View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import javax.annotation.Nonnull;
21  import javax.annotation.Nullable;
22  import java.io.Closeable;
23  import java.io.IOException;
24  import java.io.InterruptedIOException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.LinkedHashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.NavigableMap;
31  import java.util.Set;
32  import java.util.SortedMap;
33  import java.util.TreeMap;
34  import java.util.regex.Matcher;
35  import java.util.regex.Pattern;
36  
37  import com.google.common.annotations.VisibleForTesting;
38  import com.google.protobuf.ServiceException;
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.hbase.classification.InterfaceAudience;
43  import org.apache.hadoop.hbase.client.Connection;
44  import org.apache.hadoop.hbase.client.ConnectionFactory;
45  import org.apache.hadoop.hbase.client.Consistency;
46  import org.apache.hadoop.hbase.client.Delete;
47  import org.apache.hadoop.hbase.client.Get;
48  import org.apache.hadoop.hbase.client.Mutation;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.RegionLocator;
51  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
52  import org.apache.hadoop.hbase.client.Result;
53  import org.apache.hadoop.hbase.client.ResultScanner;
54  import org.apache.hadoop.hbase.client.Scan;
55  import org.apache.hadoop.hbase.client.Table;
56  import org.apache.hadoop.hbase.client.TableState;
57  import org.apache.hadoop.hbase.exceptions.DeserializationException;
58  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
59  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
60  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
61  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
64  import org.apache.hadoop.hbase.util.ExceptionUtil;
65  import org.apache.hadoop.hbase.util.Pair;
66  import org.apache.hadoop.hbase.util.PairOfSameType;
67  
68  /**
69   * Read/write operations on region and assignment information store in
70   * <code>hbase:meta</code>.
71   *
72   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
73   * for this is because when used on client-side (like from HBaseAdmin), we want to use
74   * short-living connection (opened before each operation, closed right after), while
75   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
76   */
77  @InterfaceAudience.Private
78  public class MetaTableAccessor {
79  
80    /*
81     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
82     * same table range (table, startKey, endKey). For every range, there will be at least one
83     * HRI defined which is called default replica.
84     *
85     * Meta layout (as of 0.98 + HBASE-10070) is like:
86     *
87     * For each table there is single row in column family 'table' formatted:
88     * <tableName> including namespace and columns are:
89     * table: state             => contains table state
90     *
91     * For each table range, there is a single row, formatted like:
92     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
93     * of the default region replica.
94     * Columns are:
95     * info:regioninfo         => contains serialized HRI for the default region replica
96     * info:server             => contains hostname:port (in string form) for the server hosting
97     *                            the default regionInfo replica
98     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
99     *                            regionInfo replica with replicaId
100    * info:serverstartcode    => contains server start code (in binary long form) for the server
101    *                            hosting the default regionInfo replica
102    * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
103    *                                     server hosting the regionInfo replica with replicaId
104    * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
105    *                             the server opened the region with default replicaId
106    * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
107    *                             the time the server opened the region with replicaId
108    * info:splitA              => contains a serialized HRI for the first daughter region if the
109    *                             region is split
110    * info:splitB              => contains a serialized HRI for the second daughter region if the
111    *                             region is split
112    * info:mergeA              => contains a serialized HRI for the first parent region if the
113    *                             region is the result of a merge
114    * info:mergeB              => contains a serialized HRI for the second parent region if the
115    *                             region is the result of a merge
116    *
117    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
118    * and should not leak out of it (through Result objects, etc)
119    */
120 
121   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
122   private static final Log METALOG = LogFactory.getLog("org.apache.hadoop.hbase.META");
123 
124   static final byte [] META_REGION_PREFIX;
125   static {
126     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
127     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
128     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
129     META_REGION_PREFIX = new byte [len];
130     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
131       META_REGION_PREFIX, 0, len);
132   }
133 
134   /**
135    * Lists all of the table regions currently in META.
136    * Deprecated, keep there until some test use this.
137    * @param connection what we will use
138    * @param tableName table to list
139    * @return Map of all user-space regions to servers
140    * @throws java.io.IOException
141    * @deprecated use {@link #getTableRegionsAndLocations}, region can have multiple locations
142    */
143   @Deprecated
144   public static NavigableMap<HRegionInfo, ServerName> allTableRegions(
145       Connection connection, final TableName tableName) throws IOException {
146     final NavigableMap<HRegionInfo, ServerName> regions =
147       new TreeMap<HRegionInfo, ServerName>();
148     Visitor visitor = new TableVisitorBase(tableName) {
149       @Override
150       public boolean visitInternal(Result result) throws IOException {
151         RegionLocations locations = getRegionLocations(result);
152         if (locations == null) return true;
153         for (HRegionLocation loc : locations.getRegionLocations()) {
154           if (loc != null) {
155             HRegionInfo regionInfo = loc.getRegionInfo();
156             regions.put(regionInfo, loc.getServerName());
157           }
158         }
159         return true;
160       }
161     };
162     scanMetaForTableRegions(connection, visitor, tableName);
163     return regions;
164   }
165 
166   @InterfaceAudience.Private
167   public enum QueryType {
168     ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY),
169     REGION(HConstants.CATALOG_FAMILY),
170     TABLE(HConstants.TABLE_FAMILY);
171 
172     private final byte[][] families;
173 
174     QueryType(byte[]... families) {
175       this.families = families;
176     }
177 
178     byte[][] getFamilies() {
179       return this.families;
180     }
181   }
182 
183   /** The delimiter for meta columns for replicaIds &gt; 0 */
184   protected static final char META_REPLICA_ID_DELIMITER = '_';
185 
186   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
187   private static final Pattern SERVER_COLUMN_PATTERN
188     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
189 
190   ////////////////////////
191   // Reading operations //
192   ////////////////////////
193 
194   /**
195    * Performs a full scan of <code>hbase:meta</code> for regions.
196    * @param connection connection we're using
197    * @param visitor Visitor invoked against each row in regions family.
198    * @throws IOException
199    */
200   public static void fullScanRegions(Connection connection,
201       final Visitor visitor)
202       throws IOException {
203     scanMeta(connection, null, null, QueryType.REGION, visitor);
204   }
205 
206   /**
207    * Performs a full scan of <code>hbase:meta</code> for regions.
208    * @param connection connection we're using
209    * @throws IOException
210    */
211   public static List<Result> fullScanRegions(Connection connection)
212       throws IOException {
213     return fullScan(connection, QueryType.REGION);
214   }
215 
216   /**
217    * Performs a full scan of <code>hbase:meta</code> for tables.
218    * @param connection connection we're using
219    * @param visitor Visitor invoked against each row in tables family.
220    * @throws IOException
221    */
222   public static void fullScanTables(Connection connection,
223       final Visitor visitor)
224       throws IOException {
225     scanMeta(connection, null, null, QueryType.TABLE, visitor);
226   }
227 
228   /**
229    * Performs a full scan of <code>hbase:meta</code>.
230    * @param connection connection we're using
231    * @param type scanned part of meta
232    * @return List of {@link Result}
233    * @throws IOException
234    */
235   public static List<Result> fullScan(Connection connection, QueryType type)
236     throws IOException {
237     CollectAllVisitor v = new CollectAllVisitor();
238     scanMeta(connection, null, null, type, v);
239     return v.getResults();
240   }
241 
242   /**
243    * Callers should call close on the returned {@link Table} instance.
244    * @param connection connection we're using to access Meta
245    * @return An {@link Table} for <code>hbase:meta</code>
246    * @throws IOException
247    */
248   static Table getMetaHTable(final Connection connection)
249   throws IOException {
250     // We used to pass whole CatalogTracker in here, now we just pass in Connection
251     if (connection == null) {
252       throw new NullPointerException("No connection");
253     } else if (connection.isClosed()) {
254       throw new IOException("connection is closed");
255     }
256     return connection.getTable(TableName.META_TABLE_NAME);
257   }
258 
259   /**
260    * @param t Table to use (will be closed when done).
261    * @param g Get to run
262    * @throws IOException
263    */
264   private static Result get(final Table t, final Get g) throws IOException {
265     if (t == null) return null;
266     try {
267       return t.get(g);
268     } finally {
269       t.close();
270     }
271   }
272 
273   /**
274    * Gets the region info and assignment for the specified region.
275    * @param connection connection we're using
276    * @param regionName Region to lookup.
277    * @return Location and HRegionInfo for <code>regionName</code>
278    * @throws IOException
279    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
280    */
281   @Deprecated
282   public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
283     throws IOException {
284     HRegionLocation location = getRegionLocation(connection, regionName);
285     return location == null
286       ? null
287       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
288   }
289 
290   /**
291    * Returns the HRegionLocation from meta for the given region
292    * @param connection connection we're using
293    * @param regionName region we're looking for
294    * @return HRegionLocation for the given region
295    * @throws IOException
296    */
297   public static HRegionLocation getRegionLocation(Connection connection,
298                                                   byte[] regionName) throws IOException {
299     byte[] row = regionName;
300     HRegionInfo parsedInfo = null;
301     try {
302       parsedInfo = parseRegionInfoFromRegionName(regionName);
303       row = getMetaKeyForRegion(parsedInfo);
304     } catch (Exception parseEx) {
305       ; // Ignore. This is used with tableName passed as regionName.
306     }
307     Get get = new Get(row);
308     get.addFamily(HConstants.CATALOG_FAMILY);
309     Result r = get(getMetaHTable(connection), get);
310     RegionLocations locations = getRegionLocations(r);
311     return locations == null
312       ? null
313       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
314   }
315 
316   /**
317    * Returns the HRegionLocation from meta for the given region
318    * @param connection connection we're using
319    * @param regionInfo region information
320    * @return HRegionLocation for the given region
321    * @throws IOException
322    */
323   public static HRegionLocation getRegionLocation(Connection connection,
324                                                   HRegionInfo regionInfo) throws IOException {
325     byte[] row = getMetaKeyForRegion(regionInfo);
326     Get get = new Get(row);
327     get.addFamily(HConstants.CATALOG_FAMILY);
328     Result r = get(getMetaHTable(connection), get);
329     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
330   }
331 
332   /** Returns the row key to use for this regionInfo */
333   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
334     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
335   }
336 
337   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
338    * is stored in the name, so the returned object should only be used for the fields
339    * in the regionName.
340    */
341   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
342     throws IOException {
343     byte[][] fields = HRegionInfo.parseRegionName(regionName);
344     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
345     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
346     return new HRegionInfo(
347       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
348   }
349 
350   /**
351    * Gets the result in hbase:meta for the specified region.
352    * @param connection connection we're using
353    * @param regionName region we're looking for
354    * @return result of the specified region
355    * @throws IOException
356    */
357   public static Result getRegionResult(Connection connection,
358       byte[] regionName) throws IOException {
359     Get get = new Get(regionName);
360     get.addFamily(HConstants.CATALOG_FAMILY);
361     return get(getMetaHTable(connection), get);
362   }
363 
364   /**
365    * Get regions from the merge qualifier of the specified merged region
366    * @return null if it doesn't contain merge qualifier, else two merge regions
367    * @throws IOException
368    */
369   @Nullable
370   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
371       Connection connection, byte[] regionName) throws IOException {
372     Result result = getRegionResult(connection, regionName);
373     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
374     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
375     if (mergeA == null && mergeB == null) {
376       return null;
377     }
378     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
379  }
380 
381   /**
382    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
383    * the specified server.
384    * @param connection connection we're using
385    * @param tableName table to check
386    * @return true if the table exists in meta, false if not
387    * @throws IOException
388    */
389   public static boolean tableExists(Connection connection,
390       final TableName tableName)
391   throws IOException {
392     // Catalog tables always exist.
393     return tableName.equals(TableName.META_TABLE_NAME)
394         || getTableState(connection, tableName) != null;
395   }
396 
397   /**
398    * Lists all of the regions currently in META.
399    *
400    * @param connection to connect with
401    * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions,
402    *                                    true and we'll leave out offlined regions from returned list
403    * @return List of all user-space regions.
404    * @throws IOException
405    */
406   @VisibleForTesting
407   public static List<HRegionInfo> getAllRegions(Connection connection,
408       boolean excludeOfflinedSplitParents)
409       throws IOException {
410     List<Pair<HRegionInfo, ServerName>> result;
411 
412     result = getTableRegionsAndLocations(connection, null,
413         excludeOfflinedSplitParents);
414 
415     return getListOfHRegionInfos(result);
416 
417   }
418 
419   /**
420    * Gets all of the regions of the specified table. Do not use this method
421    * to get meta table regions, use methods in MetaTableLocator instead.
422    * @param connection connection we're using
423    * @param tableName table we're looking for
424    * @return Ordered list of {@link HRegionInfo}.
425    * @throws IOException
426    */
427   public static List<HRegionInfo> getTableRegions(Connection connection, TableName tableName)
428   throws IOException {
429     return getTableRegions(connection, tableName, false);
430   }
431 
432   /**
433    * Gets all of the regions of the specified table. Do not use this method
434    * to get meta table regions, use methods in MetaTableLocator instead.
435    * @param connection connection we're using
436    * @param tableName table we're looking for
437    * @param excludeOfflinedSplitParents If true, do not include offlined split
438    * parents in the return.
439    * @return Ordered list of {@link HRegionInfo}.
440    * @throws IOException
441    */
442   public static List<HRegionInfo> getTableRegions(Connection connection,
443       TableName tableName, final boolean excludeOfflinedSplitParents)
444       throws IOException {
445     List<Pair<HRegionInfo, ServerName>> result;
446 
447     result = getTableRegionsAndLocations(connection, tableName,
448       excludeOfflinedSplitParents);
449 
450     return getListOfHRegionInfos(result);
451   }
452 
453   @Nullable
454   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
455     if (pairs == null || pairs.isEmpty()) return null;
456     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
457     for (Pair<HRegionInfo, ServerName> pair: pairs) {
458       result.add(pair.getFirst());
459     }
460     return result;
461   }
462 
463   /**
464    * @param current region of current table we're working with
465    * @param tableName table we're checking against
466    * @return True if <code>current</code> tablename is equal to
467    * <code>tableName</code>
468    */
469   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
470     return tableName.equals(current.getTable());
471   }
472 
473   /**
474    * @param tableName table we're working with
475    * @return start row for scanning META according to query type
476    */
477   public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
478     if (tableName == null) {
479       return null;
480     }
481     switch (type) {
482     case REGION:
483       byte[] startRow = new byte[tableName.getName().length + 2];
484       System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
485       startRow[startRow.length - 2] = HConstants.DELIMITER;
486       startRow[startRow.length - 1] = HConstants.DELIMITER;
487       return startRow;
488     case ALL:
489     case TABLE:
490     default:
491       return tableName.getName();
492     }
493   }
494 
495   /**
496    * @param tableName table we're working with
497    * @return stop row for scanning META according to query type
498    */
499   public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
500     if (tableName == null) {
501       return null;
502     }
503     final byte[] stopRow;
504     switch (type) {
505     case REGION:
506       stopRow = new byte[tableName.getName().length + 3];
507       System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
508       stopRow[stopRow.length - 3] = ' ';
509       stopRow[stopRow.length - 2] = HConstants.DELIMITER;
510       stopRow[stopRow.length - 1] = HConstants.DELIMITER;
511       break;
512     case ALL:
513     case TABLE:
514     default:
515       stopRow = new byte[tableName.getName().length + 1];
516       System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
517       stopRow[stopRow.length - 1] = ' ';
518       break;
519     }
520     return stopRow;
521   }
522 
523   /**
524    * This method creates a Scan object that will only scan catalog rows that
525    * belong to the specified table. It doesn't specify any columns.
526    * This is a better alternative to just using a start row and scan until
527    * it hits a new table since that requires parsing the HRI to get the table
528    * name.
529    * @param tableName bytes of table's name
530    * @return configured Scan object
531    */
532   @Deprecated
533   public static Scan getScanForTableName(Connection connection, TableName tableName) {
534     // Start key is just the table name with delimiters
535     byte[] startKey = getTableStartRowForMeta(tableName, QueryType.REGION);
536     // Stop key appends the smallest possible char to the table name
537     byte[] stopKey = getTableStopRowForMeta(tableName, QueryType.REGION);
538 
539     Scan scan = getMetaScan(connection);
540     scan.setStartRow(startKey);
541     scan.setStopRow(stopKey);
542     return scan;
543   }
544 
545   private static Scan getMetaScan(Connection connection) {
546     return getMetaScan(connection, Integer.MAX_VALUE);
547   }
548 
549   private static Scan getMetaScan(Connection connection, int rowUpperLimit) {
550     Scan scan = new Scan();
551     int scannerCaching = connection.getConfiguration()
552         .getInt(HConstants.HBASE_META_SCANNER_CACHING,
553             HConstants.DEFAULT_HBASE_META_SCANNER_CACHING);
554     if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS,
555         HConstants.DEFAULT_USE_META_REPLICAS)) {
556       scan.setConsistency(Consistency.TIMELINE);
557     }
558     if (rowUpperLimit <= scannerCaching) {
559       scan.setSmall(true);
560     }
561     int rows = Math.min(rowUpperLimit, scannerCaching);
562     scan.setCaching(rows);
563     return scan;
564   }
565   /**
566    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
567    * @param connection connection we're using
568    * @param tableName table we're looking for
569    * @return Return list of regioninfos and server.
570    * @throws IOException
571    */
572   public static List<Pair<HRegionInfo, ServerName>>
573     getTableRegionsAndLocations(Connection connection, TableName tableName)
574       throws IOException {
575     return getTableRegionsAndLocations(connection, tableName, true);
576   }
577 
578   /**
579    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
580    * @param connection connection we're using
581    * @param tableName table to work with, can be null for getting all regions
582    * @param excludeOfflinedSplitParents don't return split parents
583    * @return Return list of regioninfos and server addresses.
584    * @throws IOException
585    */
586   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
587       Connection connection, @Nullable final TableName tableName,
588       final boolean excludeOfflinedSplitParents) throws IOException {
589     if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) {
590       throw new IOException("This method can't be used to locate meta regions;"
591         + " use MetaTableLocator instead");
592     }
593     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
594     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
595       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
596         private RegionLocations current = null;
597 
598         @Override
599         public boolean visit(Result r) throws IOException {
600           current = getRegionLocations(r);
601           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
602             LOG.warn("No serialized HRegionInfo in " + r);
603             return true;
604           }
605           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
606           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
607           // Else call super and add this Result to the collection.
608           return super.visit(r);
609         }
610 
611         @Override
612         void add(Result r) {
613           if (current == null) {
614             return;
615           }
616           for (HRegionLocation loc : current.getRegionLocations()) {
617             if (loc != null) {
618               this.results.add(new Pair<HRegionInfo, ServerName>(
619                 loc.getRegionInfo(), loc.getServerName()));
620             }
621           }
622         }
623       };
624     scanMeta(connection,
625         getTableStartRowForMeta(tableName, QueryType.REGION),
626         getTableStopRowForMeta(tableName, QueryType.REGION),
627         QueryType.REGION, visitor);
628     return visitor.getResults();
629   }
630 
631   /**
632    * @param connection connection we're using
633    * @param serverName server whose regions we're interested in
634    * @return List of user regions installed on this server (does not include
635    * catalog regions).
636    * @throws IOException
637    */
638   public static NavigableMap<HRegionInfo, Result>
639   getServerUserRegions(Connection connection, final ServerName serverName)
640     throws IOException {
641     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
642     // Fill the above hris map with entries from hbase:meta that have the passed
643     // servername.
644     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
645       @Override
646       void add(Result r) {
647         if (r == null || r.isEmpty()) return;
648         RegionLocations locations = getRegionLocations(r);
649         if (locations == null) return;
650         for (HRegionLocation loc : locations.getRegionLocations()) {
651           if (loc != null) {
652             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
653               hris.put(loc.getRegionInfo(), r);
654             }
655           }
656         }
657       }
658     };
659     scanMeta(connection, null, null, QueryType.REGION, v);
660     return hris;
661   }
662 
663   public static void fullScanMetaAndPrint(Connection connection)
664     throws IOException {
665     Visitor v = new Visitor() {
666       @Override
667       public boolean visit(Result r) throws IOException {
668         if (r ==  null || r.isEmpty()) return true;
669         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
670         TableState state = getTableState(r);
671         if (state != null) {
672           LOG.info("Table State: " + state);
673         } else {
674           RegionLocations locations = getRegionLocations(r);
675           if (locations == null) return true;
676           for (HRegionLocation loc : locations.getRegionLocations()) {
677             if (loc != null) {
678               LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
679             }
680           }
681         }
682         return true;
683       }
684     };
685     scanMeta(connection, null, null, QueryType.ALL, v);
686   }
687 
688   public static void scanMetaForTableRegions(Connection connection,
689       Visitor visitor, TableName tableName) throws IOException {
690     scanMeta(connection, tableName, QueryType.REGION, Integer.MAX_VALUE, visitor);
691   }
692 
693   public static void scanMeta(Connection connection, TableName table,
694       QueryType type, int maxRows, final Visitor visitor) throws IOException {
695     scanMeta(connection, getTableStartRowForMeta(table, type), getTableStopRowForMeta(table, type),
696         type, maxRows, visitor);
697   }
698 
699   public static void scanMeta(Connection connection,
700       @Nullable final byte[] startRow, @Nullable final byte[] stopRow,
701       QueryType type, final Visitor visitor) throws IOException {
702     scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor);
703   }
704 
705   /**
706    * Performs a scan of META table for given table starting from
707    * given row.
708    *
709    * @param connection connection we're using
710    * @param visitor    visitor to call
711    * @param tableName  table withing we scan
712    * @param row        start scan from this row
713    * @param rowLimit   max number of rows to return
714    * @throws IOException
715    */
716   public static void scanMeta(Connection connection,
717       final Visitor visitor, final TableName tableName,
718       final byte[] row, final int rowLimit)
719       throws IOException {
720 
721     byte[] startRow = null;
722     byte[] stopRow = null;
723     if (tableName != null) {
724       startRow =
725           getTableStartRowForMeta(tableName, QueryType.REGION);
726       if (row != null) {
727         HRegionInfo closestRi =
728             getClosestRegionInfo(connection, tableName, row);
729         startRow = HRegionInfo
730             .createRegionName(tableName, closestRi.getStartKey(), HConstants.ZEROES, false);
731       }
732       stopRow =
733           getTableStopRowForMeta(tableName, QueryType.REGION);
734     }
735     scanMeta(connection, startRow, stopRow, QueryType.REGION, rowLimit, visitor);
736   }
737 
738 
739   /**
740    * Performs a scan of META table.
741    * @param connection connection we're using
742    * @param startRow Where to start the scan. Pass null if want to begin scan
743    *                 at first row.
744    * @param stopRow Where to stop the scan. Pass null if want to scan all rows
745    *                from the start one
746    * @param type scanned part of meta
747    * @param maxRows maximum rows to return
748    * @param visitor Visitor invoked against each row.
749    * @throws IOException
750    */
751   public static void scanMeta(Connection connection,
752       @Nullable final byte[] startRow, @Nullable final byte[] stopRow,
753       QueryType type, int maxRows, final Visitor visitor)
754   throws IOException {
755     int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
756     Scan scan = getMetaScan(connection, rowUpperLimit);
757 
758     for (byte[] family : type.getFamilies()) {
759       scan.addFamily(family);
760     }
761     if (startRow != null) scan.setStartRow(startRow);
762     if (stopRow != null) scan.setStopRow(stopRow);
763 
764     if (LOG.isTraceEnabled()) {
765       LOG.trace("Scanning META"
766           + " starting at row=" + Bytes.toStringBinary(startRow)
767           + " stopping at row=" + Bytes.toStringBinary(stopRow)
768           + " for max=" + rowUpperLimit
769           + " with caching=" + scan.getCaching());
770     }
771 
772     int currentRow = 0;
773     try (Table metaTable = getMetaHTable(connection)) {
774       try (ResultScanner scanner = metaTable.getScanner(scan)) {
775         Result data;
776         while ((data = scanner.next()) != null) {
777           if (data.isEmpty()) continue;
778           // Break if visit returns false.
779           if (!visitor.visit(data)) break;
780           if (++currentRow >= rowUpperLimit) break;
781         }
782       }
783     }
784     if (visitor != null && visitor instanceof Closeable) {
785       try {
786         ((Closeable) visitor).close();
787       } catch (Throwable t) {
788         ExceptionUtil.rethrowIfInterrupt(t);
789         LOG.debug("Got exception in closing the meta scanner visitor", t);
790       }
791     }
792   }
793 
794   /**
795    * @return Get closest metatable region row to passed <code>row</code>
796    * @throws java.io.IOException
797    */
798   @Nonnull
799   public static HRegionInfo getClosestRegionInfo(Connection connection,
800       @Nonnull final TableName tableName,
801       @Nonnull final byte[] row)
802       throws IOException {
803     byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
804     Scan scan = getMetaScan(connection, 1);
805     scan.setReversed(true);
806     scan.setStartRow(searchRow);
807     try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) {
808       Result result = resultScanner.next();
809       if (result == null) {
810         throw new TableNotFoundException("Cannot find row in META " +
811             " for table: " + tableName + ", row=" + Bytes.toStringBinary(row));
812       }
813       HRegionInfo regionInfo = getHRegionInfo(result);
814       if (regionInfo == null) {
815         throw new IOException("HRegionInfo was null or empty in Meta for " +
816             tableName + ", row=" + Bytes.toStringBinary(row));
817       }
818       return regionInfo;
819     }
820   }
821 
822   /**
823    * Returns the column family used for meta columns.
824    * @return HConstants.CATALOG_FAMILY.
825    */
826   protected static byte[] getCatalogFamily() {
827     return HConstants.CATALOG_FAMILY;
828   }
829 
830   /**
831    * Returns the column family used for table columns.
832    * @return HConstants.TABLE_FAMILY.
833    */
834   protected static byte[] getTableFamily() {
835     return HConstants.TABLE_FAMILY;
836   }
837 
838   /**
839    * Returns the column qualifier for serialized region info
840    * @return HConstants.REGIONINFO_QUALIFIER
841    */
842   protected static byte[] getRegionInfoColumn() {
843     return HConstants.REGIONINFO_QUALIFIER;
844   }
845 
846   /**
847    * Returns the column qualifier for serialized table state
848    *
849    * @return HConstants.TABLE_STATE_QUALIFIER
850    */
851   protected static byte[] getStateColumn() {
852     return HConstants.TABLE_STATE_QUALIFIER;
853   }
854 
855   /**
856    * Returns the column qualifier for server column for replicaId
857    * @param replicaId the replicaId of the region
858    * @return a byte[] for server column qualifier
859    */
860   @VisibleForTesting
861   public static byte[] getServerColumn(int replicaId) {
862     return replicaId == 0
863       ? HConstants.SERVER_QUALIFIER
864       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
865       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
866   }
867 
868   /**
869    * Returns the column qualifier for server start code column for replicaId
870    * @param replicaId the replicaId of the region
871    * @return a byte[] for server start code column qualifier
872    */
873   @VisibleForTesting
874   public static byte[] getStartCodeColumn(int replicaId) {
875     return replicaId == 0
876       ? HConstants.STARTCODE_QUALIFIER
877       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
878       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
879   }
880 
881   /**
882    * Returns the column qualifier for seqNum column for replicaId
883    * @param replicaId the replicaId of the region
884    * @return a byte[] for seqNum column qualifier
885    */
886   @VisibleForTesting
887   public static byte[] getSeqNumColumn(int replicaId) {
888     return replicaId == 0
889       ? HConstants.SEQNUM_QUALIFIER
890       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
891       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
892   }
893 
894   /**
895    * Parses the replicaId from the server column qualifier. See top of the class javadoc
896    * for the actual meta layout
897    * @param serverColumn the column qualifier
898    * @return an int for the replicaId
899    */
900   @VisibleForTesting
901   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
902     String serverStr = Bytes.toString(serverColumn);
903 
904     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
905     if (matcher.matches() && matcher.groupCount() > 0) {
906       String group = matcher.group(1);
907       if (group != null && group.length() > 0) {
908         return Integer.parseInt(group.substring(1), 16);
909       } else {
910         return 0;
911       }
912     }
913     return -1;
914   }
915 
916   /**
917    * Returns a {@link ServerName} from catalog table {@link Result}.
918    * @param r Result to pull from
919    * @return A ServerName instance or null if necessary fields not found or empty.
920    */
921   @Nullable
922   @InterfaceAudience.Private // for use by HMaster#getTableRegionRow which is used for testing only
923   public static ServerName getServerName(final Result r, final int replicaId) {
924     byte[] serverColumn = getServerColumn(replicaId);
925     Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn);
926     if (cell == null || cell.getValueLength() == 0) return null;
927     String hostAndPort = Bytes.toString(
928       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
929     byte[] startcodeColumn = getStartCodeColumn(replicaId);
930     cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn);
931     if (cell == null || cell.getValueLength() == 0) return null;
932     try {
933       return ServerName.valueOf(hostAndPort,
934           Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
935     } catch (IllegalArgumentException e) {
936       LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
937       return null;
938     }
939   }
940 
941   /**
942    * The latest seqnum that the server writing to meta observed when opening the region.
943    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
944    * @param r Result to pull the seqNum from
945    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
946    */
947   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
948     Cell cell = r.getColumnLatestCell(getCatalogFamily(), getSeqNumColumn(replicaId));
949     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
950     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
951   }
952 
953   /**
954    * Returns an HRegionLocationList extracted from the result.
955    * @return an HRegionLocationList containing all locations for the region range or null if
956    *  we can't deserialize the result.
957    */
958   @Nullable
959   public static RegionLocations getRegionLocations(final Result r) {
960     if (r == null) return null;
961     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
962     if (regionInfo == null) return null;
963 
964     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
965     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
966 
967     locations.add(getRegionLocation(r, regionInfo, 0));
968 
969     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getCatalogFamily());
970     if (infoMap == null) return new RegionLocations(locations);
971 
972     // iterate until all serverName columns are seen
973     int replicaId = 0;
974     byte[] serverColumn = getServerColumn(replicaId);
975     SortedMap<byte[], byte[]> serverMap = infoMap.tailMap(serverColumn, false);
976     if (serverMap.isEmpty()) return new RegionLocations(locations);
977 
978     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
979       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
980       if (replicaId < 0) {
981         break;
982       }
983       HRegionLocation location = getRegionLocation(r, regionInfo, replicaId);
984       // In case the region replica is newly created, it's location might be null. We usually do not
985       // have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
986       if (location == null || location.getServerName() == null) {
987         locations.add(null);
988       } else {
989         locations.add(location);
990       }
991     }
992 
993     return new RegionLocations(locations);
994   }
995 
996   /**
997    * Returns the HRegionLocation parsed from the given meta row Result
998    * for the given regionInfo and replicaId. The regionInfo can be the default region info
999    * for the replica.
1000    * @param r the meta row result
1001    * @param regionInfo RegionInfo for default replica
1002    * @param replicaId the replicaId for the HRegionLocation
1003    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
1004    */
1005   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
1006                                                    final int replicaId) {
1007     ServerName serverName = getServerName(r, replicaId);
1008     long seqNum = getSeqNumDuringOpen(r, replicaId);
1009     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
1010     return new HRegionLocation(replicaInfo, serverName, seqNum);
1011   }
1012 
1013   /**
1014    * Returns HRegionInfo object from the column
1015    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
1016    * table Result.
1017    * @param data a Result object from the catalog table scan
1018    * @return HRegionInfo or null
1019    */
1020   public static HRegionInfo getHRegionInfo(Result data) {
1021     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
1022   }
1023 
1024   /**
1025    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
1026    * <code>qualifier</code> of the catalog table result.
1027    * @param r a Result object from the catalog table scan
1028    * @param qualifier Column family qualifier
1029    * @return An HRegionInfo instance or null.
1030    */
1031   @Nullable
1032   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
1033     Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier);
1034     if (cell == null) return null;
1035     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
1036       cell.getValueOffset(), cell.getValueLength());
1037   }
1038 
1039   /**
1040    * Returns the daughter regions by reading the corresponding columns of the catalog table
1041    * Result.
1042    * @param data a Result object from the catalog table scan
1043    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
1044    * parent
1045    */
1046   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) {
1047     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
1048     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
1049 
1050     return new PairOfSameType<HRegionInfo>(splitA, splitB);
1051   }
1052 
1053   /**
1054    * Returns the merge regions by reading the corresponding columns of the catalog table
1055    * Result.
1056    * @param data a Result object from the catalog table scan
1057    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
1058    * parent
1059    */
1060   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) {
1061     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
1062     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
1063 
1064     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
1065   }
1066 
1067   /**
1068    * Fetch table state for given table from META table
1069    * @param conn connection to use
1070    * @param tableName table to fetch state for
1071    * @return state
1072    * @throws IOException
1073    */
1074   @Nullable
1075   public static TableState getTableState(Connection conn, TableName tableName)
1076       throws IOException {
1077     Table metaHTable = getMetaHTable(conn);
1078     Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
1079     long time = EnvironmentEdgeManager.currentTime();
1080     get.setTimeRange(0, time);
1081     Result result =
1082         metaHTable.get(get);
1083     return getTableState(result);
1084   }
1085 
1086   /**
1087    * Fetch table states from META table
1088    * @param conn connection to use
1089    * @return map {tableName -&gt; state}
1090    * @throws IOException
1091    */
1092   public static Map<TableName, TableState> getTableStates(Connection conn)
1093       throws IOException {
1094     final Map<TableName, TableState> states = new LinkedHashMap<>();
1095     Visitor collector = new Visitor() {
1096       @Override
1097       public boolean visit(Result r) throws IOException {
1098         TableState state = getTableState(r);
1099         if (state != null)
1100           states.put(state.getTableName(), state);
1101         return true;
1102       }
1103     };
1104     fullScanTables(conn, collector);
1105     return states;
1106   }
1107 
1108   /**
1109    * Updates state in META
1110    * @param conn connection to use
1111    * @param tableName table to look for
1112    * @throws IOException
1113    */
1114   public static void updateTableState(Connection conn, TableName tableName,
1115       TableState.State actual) throws IOException {
1116     updateTableState(conn, new TableState(tableName, actual));
1117   }
1118 
1119   /**
1120    * Decode table state from META Result.
1121    * Should contain cell from HConstants.TABLE_FAMILY
1122    * @param r result
1123    * @return null if not found
1124    * @throws IOException
1125    */
1126   @Nullable
1127   public static TableState getTableState(Result r)
1128       throws IOException {
1129     Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn());
1130     if (cell == null) return null;
1131     try {
1132       return TableState.parseFrom(TableName.valueOf(r.getRow()),
1133           Arrays.copyOfRange(cell.getValueArray(),
1134           cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength()));
1135     } catch (DeserializationException e) {
1136       throw new IOException(e);
1137     }
1138 
1139   }
1140 
1141   /**
1142    * Implementations 'visit' a catalog table row.
1143    */
1144   public interface Visitor {
1145     /**
1146      * Visit the catalog table row.
1147      * @param r A row from catalog table
1148      * @return True if we are to proceed scanning the table, else false if
1149      * we are to stop now.
1150      */
1151     boolean visit(final Result r) throws IOException;
1152   }
1153 
1154   /**
1155    * Implementations 'visit' a catalog table row but with close() at the end.
1156    */
1157   public interface CloseableVisitor extends Visitor, Closeable {
1158   }
1159 
1160   /**
1161    * A {@link Visitor} that collects content out of passed {@link Result}.
1162    */
1163   static abstract class CollectingVisitor<T> implements Visitor {
1164     final List<T> results = new ArrayList<T>();
1165     @Override
1166     public boolean visit(Result r) throws IOException {
1167       if (r ==  null || r.isEmpty()) return true;
1168       add(r);
1169       return true;
1170     }
1171 
1172     abstract void add(Result r);
1173 
1174     /**
1175      * @return Collected results; wait till visits complete to collect all
1176      * possible results
1177      */
1178     List<T> getResults() {
1179       return this.results;
1180     }
1181   }
1182 
1183   /**
1184    * Collects all returned.
1185    */
1186   static class CollectAllVisitor extends CollectingVisitor<Result> {
1187     @Override
1188     void add(Result r) {
1189       this.results.add(r);
1190     }
1191   }
1192 
1193   /**
1194    * A Visitor that skips offline regions and split parents
1195    */
1196   public static abstract class DefaultVisitorBase implements Visitor {
1197 
1198     public DefaultVisitorBase() {
1199       super();
1200     }
1201 
1202     public abstract boolean visitInternal(Result rowResult) throws IOException;
1203 
1204     @Override
1205     public boolean visit(Result rowResult) throws IOException {
1206       HRegionInfo info = getHRegionInfo(rowResult);
1207       if (info == null) {
1208         return true;
1209       }
1210 
1211       //skip over offline and split regions
1212       if (!(info.isOffline() || info.isSplit())) {
1213         return visitInternal(rowResult);
1214       }
1215       return true;
1216     }
1217   }
1218 
1219   /**
1220    * A Visitor for a table. Provides a consistent view of the table's
1221    * hbase:meta entries during concurrent splits (see HBASE-5986 for details). This class
1222    * does not guarantee ordered traversal of meta entries, and can block until the
1223    * hbase:meta entries for daughters are available during splits.
1224    */
1225   public static abstract class TableVisitorBase extends DefaultVisitorBase {
1226     private TableName tableName;
1227 
1228     public TableVisitorBase(TableName tableName) {
1229       super();
1230       this.tableName = tableName;
1231     }
1232 
1233     @Override
1234     public final boolean visit(Result rowResult) throws IOException {
1235       HRegionInfo info = getHRegionInfo(rowResult);
1236       if (info == null) {
1237         return true;
1238       }
1239       if (!(info.getTable().equals(tableName))) {
1240         return false;
1241       }
1242       return super.visit(rowResult);
1243     }
1244   }
1245 
1246   /**
1247    * Count regions in <code>hbase:meta</code> for passed table.
1248    * @param c Configuration object
1249    * @param tableName table name to count regions for
1250    * @return Count or regions in table <code>tableName</code>
1251    * @throws IOException
1252    */
1253   public static int getRegionCount(final Configuration c, final TableName tableName)
1254   throws IOException {
1255     try (Connection connection = ConnectionFactory.createConnection(c)) {
1256       return getRegionCount(connection, tableName);
1257     }
1258   }
1259 
1260   /**
1261    * Count regions in <code>hbase:meta</code> for passed table.
1262    * @param connection Connection object
1263    * @param tableName table name to count regions for
1264    * @return Count or regions in table <code>tableName</code>
1265    * @throws IOException
1266    */
1267   public static int getRegionCount(final Connection connection, final TableName tableName)
1268   throws IOException {
1269     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
1270       List<HRegionLocation> locations = locator.getAllRegionLocations();
1271       return locations == null? 0: locations.size();
1272     }
1273   }
1274 
1275   ////////////////////////
1276   // Editing operations //
1277   ////////////////////////
1278 
1279   /**
1280    * Generates and returns a Put containing the region into for the catalog table
1281    */
1282   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
1283     throws IOException {
1284     return makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime());
1285   }
1286 
1287   /**
1288    * Generates and returns a Put containing the region into for the catalog table
1289    */
1290   public static Put makePutFromRegionInfo(HRegionInfo regionInfo, long ts)
1291     throws IOException {
1292     Put put = new Put(regionInfo.getRegionName(), ts);
1293     addRegionInfo(put, regionInfo);
1294     return put;
1295   }
1296 
1297   /**
1298    * Generates and returns a Delete containing the region info for the catalog
1299    * table
1300    */
1301   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
1302     long now = EnvironmentEdgeManager.currentTime();
1303     return makeDeleteFromRegionInfo(regionInfo, now);
1304   }
1305 
1306   /**
1307    * Generates and returns a Delete containing the region info for the catalog
1308    * table
1309    */
1310   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo, long ts) {
1311     if (regionInfo == null) {
1312       throw new IllegalArgumentException("Can't make a delete for null region");
1313     }
1314     Delete delete = new Delete(regionInfo.getRegionName());
1315     delete.addFamily(getCatalogFamily(), ts);
1316     return delete;
1317   }
1318 
1319   /**
1320    * Adds split daughters to the Put
1321    */
1322   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
1323     if (splitA != null) {
1324       put.addImmutable(
1325         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
1326     }
1327     if (splitB != null) {
1328       put.addImmutable(
1329         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
1330     }
1331     return put;
1332   }
1333 
1334   /**
1335    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
1336    * @param connection connection we're using
1337    * @param p Put to add to hbase:meta
1338    * @throws IOException
1339    */
1340   static void putToMetaTable(final Connection connection, final Put p)
1341     throws IOException {
1342     put(getMetaHTable(connection), p);
1343   }
1344 
1345   /**
1346    * @param t Table to use (will be closed when done).
1347    * @param p put to make
1348    * @throws IOException
1349    */
1350   private static void put(final Table t, final Put p) throws IOException {
1351     try {
1352       if (METALOG.isDebugEnabled()) {
1353         METALOG.debug(mutationToString(p));
1354       }
1355       t.put(p);
1356     } finally {
1357       t.close();
1358     }
1359   }
1360 
1361   /**
1362    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
1363    * @param connection connection we're using
1364    * @param ps Put to add to hbase:meta
1365    * @throws IOException
1366    */
1367   public static void putsToMetaTable(final Connection connection, final List<Put> ps)
1368     throws IOException {
1369     Table t = getMetaHTable(connection);
1370     try {
1371       if (METALOG.isDebugEnabled()) {
1372         METALOG.debug(mutationsToString(ps));
1373       }
1374       t.put(ps);
1375     } finally {
1376       t.close();
1377     }
1378   }
1379 
1380   /**
1381    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
1382    * @param connection connection we're using
1383    * @param d Delete to add to hbase:meta
1384    * @throws IOException
1385    */
1386   static void deleteFromMetaTable(final Connection connection, final Delete d)
1387     throws IOException {
1388     List<Delete> dels = new ArrayList<Delete>(1);
1389     dels.add(d);
1390     deleteFromMetaTable(connection, dels);
1391   }
1392 
1393   /**
1394    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
1395    * @param connection connection we're using
1396    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
1397    * @throws IOException
1398    */
1399   public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
1400     throws IOException {
1401     Table t = getMetaHTable(connection);
1402     try {
1403       if (METALOG.isDebugEnabled()) {
1404         METALOG.debug(mutationsToString(deletes));
1405       }
1406       t.delete(deletes);
1407     } finally {
1408       t.close();
1409     }
1410   }
1411 
1412   /**
1413    * Deletes some replica columns corresponding to replicas for the passed rows
1414    * @param metaRows rows in hbase:meta
1415    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1416    * @param numReplicasToRemove how many replicas to remove
1417    * @param connection connection we're using to access meta table
1418    * @throws IOException
1419    */
1420   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1421     int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
1422       throws IOException {
1423     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1424     for (byte[] row : metaRows) {
1425       long now = EnvironmentEdgeManager.currentTime();
1426       Delete deleteReplicaLocations = new Delete(row);
1427       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1428         deleteReplicaLocations.addColumns(getCatalogFamily(),
1429           getServerColumn(i), now);
1430         deleteReplicaLocations.addColumns(getCatalogFamily(),
1431           getSeqNumColumn(i), now);
1432         deleteReplicaLocations.addColumns(getCatalogFamily(),
1433           getStartCodeColumn(i), now);
1434       }
1435       deleteFromMetaTable(connection, deleteReplicaLocations);
1436     }
1437   }
1438 
1439   /**
1440    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1441    * @param connection connection we're using
1442    * @param mutations Puts and Deletes to execute on hbase:meta
1443    * @throws IOException
1444    */
1445   public static void mutateMetaTable(final Connection connection,
1446                                      final List<Mutation> mutations)
1447     throws IOException {
1448     Table t = getMetaHTable(connection);
1449     try {
1450       if (METALOG.isDebugEnabled()) {
1451         METALOG.debug(mutationsToString(mutations));
1452       }
1453       t.batch(mutations, null);
1454     } catch (InterruptedException e) {
1455       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1456       ie.initCause(e);
1457       throw ie;
1458     } finally {
1459       t.close();
1460     }
1461   }
1462 
1463   /**
1464    * Adds a hbase:meta row for the specified new region.
1465    * @param connection connection we're using
1466    * @param regionInfo region information
1467    * @throws IOException if problem connecting or updating meta
1468    */
1469   public static void addRegionToMeta(Connection connection,
1470                                      HRegionInfo regionInfo)
1471     throws IOException {
1472     putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
1473     LOG.info("Added " + regionInfo.getRegionNameAsString());
1474   }
1475 
1476   /**
1477    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1478    * Table is not flushed or closed.
1479    * @param meta the Table for META
1480    * @param regionInfo region information
1481    * @throws IOException if problem connecting or updating meta
1482    */
1483   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1484     addRegionToMeta(meta, regionInfo, null, null);
1485   }
1486 
1487   /**
1488    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1489    * does not add its daughter's as different rows, but adds information about the daughters
1490    * in the same row as the parent. Use
1491    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName, int)}
1492    * if you want to do that.
1493    * @param meta the Table for META
1494    * @param regionInfo region information
1495    * @param splitA first split daughter of the parent regionInfo
1496    * @param splitB second split daughter of the parent regionInfo
1497    * @throws IOException if problem connecting or updating meta
1498    */
1499   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1500                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1501     Put put = makePutFromRegionInfo(regionInfo);
1502     addDaughtersToPut(put, splitA, splitB);
1503     meta.put(put);
1504     if (METALOG.isDebugEnabled()) {
1505       METALOG.debug(mutationToString(put));
1506     }
1507     if (LOG.isDebugEnabled()) {
1508       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1509     }
1510   }
1511 
1512   /**
1513    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1514    * does not add its daughter's as different rows, but adds information about the daughters
1515    * in the same row as the parent. Use
1516    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName, int)}
1517    * if you want to do that.
1518    * @param connection connection we're using
1519    * @param regionInfo region information
1520    * @param splitA first split daughter of the parent regionInfo
1521    * @param splitB second split daughter of the parent regionInfo
1522    * @throws IOException if problem connecting or updating meta
1523    */
1524   public static void addRegionToMeta(Connection connection, HRegionInfo regionInfo,
1525                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1526     Table meta = getMetaHTable(connection);
1527     try {
1528       addRegionToMeta(meta, regionInfo, splitA, splitB);
1529     } finally {
1530       meta.close();
1531     }
1532   }
1533 
1534   /**
1535    * Adds a hbase:meta row for each of the specified new regions.
1536    * @param connection connection we're using
1537    * @param regionInfos region information list
1538    * @throws IOException if problem connecting or updating meta
1539    */
1540   public static void addRegionsToMeta(Connection connection,
1541                                       List<HRegionInfo> regionInfos, int regionReplication)
1542     throws IOException {
1543     addRegionsToMeta(connection, regionInfos, regionReplication, HConstants.LATEST_TIMESTAMP);
1544   }
1545   /**
1546    * Adds a hbase:meta row for each of the specified new regions.
1547    * @param connection connection we're using
1548    * @param regionInfos region information list
1549    * @param regionReplication
1550    * @param ts desired timestamp
1551    * @throws IOException if problem connecting or updating meta
1552    */
1553   public static void addRegionsToMeta(Connection connection,
1554       List<HRegionInfo> regionInfos, int regionReplication, long ts)
1555           throws IOException {
1556     List<Put> puts = new ArrayList<Put>();
1557     for (HRegionInfo regionInfo : regionInfos) {
1558       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1559         Put put = makePutFromRegionInfo(regionInfo, ts);
1560         // Add empty locations for region replicas so that number of replicas can be cached
1561         // whenever the primary region is looked up from meta
1562         for (int i = 1; i < regionReplication; i++) {
1563           addEmptyLocation(put, i);
1564         }
1565         puts.add(put);
1566       }
1567     }
1568     putsToMetaTable(connection, puts);
1569     LOG.info("Added " + puts.size());
1570   }
1571 
1572   /**
1573    * Adds a daughter region entry to meta.
1574    * @param regionInfo the region to put
1575    * @param sn the location of the region
1576    * @param openSeqNum the latest sequence number obtained when the region was open
1577    */
1578   public static void addDaughter(final Connection connection,
1579       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1580       throws NotAllMetaRegionsOnlineException, IOException {
1581     long now = EnvironmentEdgeManager.currentTime();
1582     Put put = new Put(regionInfo.getRegionName(), now);
1583     addRegionInfo(put, regionInfo);
1584     if (sn != null) {
1585       addLocation(put, sn, openSeqNum, -1, regionInfo.getReplicaId());
1586     }
1587     putToMetaTable(connection, put);
1588     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1589       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1590   }
1591 
1592   /**
1593    * Merge the two regions into one in an atomic operation. Deletes the two
1594    * merging regions in hbase:meta and adds the merged region with the information of
1595    * two merging regions.
1596    * @param connection connection we're using
1597    * @param mergedRegion the merged region
1598    * @param regionA
1599    * @param regionB
1600    * @param sn the location of the region
1601    * @param masterSystemTime
1602    * @throws IOException
1603    */
1604   public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
1605       HRegionInfo regionA, HRegionInfo regionB, ServerName sn, int regionReplication,
1606       long masterSystemTime)
1607           throws IOException {
1608     Table meta = getMetaHTable(connection);
1609     try {
1610       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1611 
1612       // use the maximum of what master passed us vs local time.
1613       long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
1614 
1615       // Put for parent
1616       Put putOfMerged = makePutFromRegionInfo(copyOfMerged, time);
1617       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1618         regionA.toByteArray());
1619       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1620         regionB.toByteArray());
1621 
1622       // Deletes for merging regions
1623       Delete deleteA = makeDeleteFromRegionInfo(regionA, time);
1624       Delete deleteB = makeDeleteFromRegionInfo(regionB, time);
1625 
1626       // The merged is a new region, openSeqNum = 1 is fine.
1627       addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
1628 
1629       // Add empty locations for region replicas of the merged region so that number of replicas can
1630       // be cached whenever the primary region is looked up from meta
1631       for (int i = 1; i < regionReplication; i++) {
1632         addEmptyLocation(putOfMerged, i);
1633       }
1634 
1635       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1636         + HConstants.DELIMITER);
1637       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1638     } finally {
1639       meta.close();
1640     }
1641   }
1642 
1643   /**
1644    * Splits the region into two in an atomic operation. Offlines the parent
1645    * region with the information that it is split into two, and also adds
1646    * the daughter regions. Does not add the location information to the daughter
1647    * regions since they are not open yet.
1648    * @param connection connection we're using
1649    * @param parent the parent region which is split
1650    * @param splitA Split daughter region A
1651    * @param splitB Split daughter region A
1652    * @param sn the location of the region
1653    */
1654   public static void splitRegion(final Connection connection,
1655                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1656                                  ServerName sn, int regionReplication) throws IOException {
1657     Table meta = getMetaHTable(connection);
1658     try {
1659       HRegionInfo copyOfParent = new HRegionInfo(parent);
1660       copyOfParent.setOffline(true);
1661       copyOfParent.setSplit(true);
1662 
1663       //Put for parent
1664       Put putParent = makePutFromRegionInfo(copyOfParent);
1665       addDaughtersToPut(putParent, splitA, splitB);
1666 
1667       //Puts for daughters
1668       Put putA = makePutFromRegionInfo(splitA);
1669       Put putB = makePutFromRegionInfo(splitB);
1670 
1671       addLocation(putA, sn, 1, -1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1672       addLocation(putB, sn, 1, -1, splitB.getReplicaId());
1673 
1674       // Add empty locations for region replicas of daughters so that number of replicas can be
1675       // cached whenever the primary region is looked up from meta
1676       for (int i = 1; i < regionReplication; i++) {
1677         addEmptyLocation(putA, i);
1678         addEmptyLocation(putB, i);
1679       }
1680 
1681       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1682       multiMutate(meta, tableRow, putParent, putA, putB);
1683     } finally {
1684       meta.close();
1685     }
1686   }
1687 
1688   /**
1689    * Update state of the table in meta.
1690    * @param connection what we use for update
1691    * @param state new state
1692    * @throws IOException
1693    */
1694   public static void updateTableState(Connection connection, TableState state)
1695       throws IOException {
1696     Put put = makePutFromTableState(state);
1697     putToMetaTable(connection, put);
1698     LOG.info(
1699         "Updated table " + state.getTableName() + " state to " + state.getState() + " in META");
1700   }
1701 
1702   /**
1703    * Construct PUT for given state
1704    * @param state new state
1705    */
1706   public static Put makePutFromTableState(TableState state) {
1707     long time = EnvironmentEdgeManager.currentTime();
1708     Put put = new Put(state.getTableName().getName(), time);
1709     put.addColumn(getTableFamily(), getStateColumn(), state.convert().toByteArray());
1710     return put;
1711   }
1712 
1713   /**
1714    * Remove state for table from meta
1715    * @param connection to use for deletion
1716    * @param table to delete state for
1717    */
1718   public static void deleteTableState(Connection connection, TableName table)
1719       throws IOException {
1720     long time = EnvironmentEdgeManager.currentTime();
1721     Delete delete = new Delete(table.getName());
1722     delete.addColumns(getTableFamily(), getStateColumn(), time);
1723     deleteFromMetaTable(connection, delete);
1724     LOG.info("Deleted table " + table + " state from META");
1725   }
1726 
1727   /**
1728    * Performs an atomic multi-Mutate operation against the given table.
1729    */
1730   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1731       throws IOException {
1732     CoprocessorRpcChannel channel = table.coprocessorService(row);
1733     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1734       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1735     if (METALOG.isDebugEnabled()) {
1736       METALOG.debug(mutationsToString(mutations));
1737     }
1738     for (Mutation mutation : mutations) {
1739       if (mutation instanceof Put) {
1740         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1741           ClientProtos.MutationProto.MutationType.PUT, mutation));
1742       } else if (mutation instanceof Delete) {
1743         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1744           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1745       } else {
1746         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1747           + mutation.getClass().getName());
1748       }
1749     }
1750 
1751     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1752       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1753     try {
1754       service.mutateRows(null, mmrBuilder.build());
1755     } catch (ServiceException ex) {
1756       ProtobufUtil.toIOException(ex);
1757     }
1758   }
1759 
1760   /**
1761    * Updates the location of the specified region in hbase:meta to be the specified
1762    * server hostname and startcode.
1763    * <p>
1764    * Uses passed catalog tracker to get a connection to the server hosting
1765    * hbase:meta and makes edits to that region.
1766    *
1767    * @param connection connection we're using
1768    * @param regionInfo region to update location of
1769    * @param openSeqNum the latest sequence number obtained when the region was open
1770    * @param sn Server name
1771    * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
1772    * @throws IOException
1773    */
1774   public static void updateRegionLocation(Connection connection,
1775                                           HRegionInfo regionInfo, ServerName sn, long openSeqNum,
1776                                           long masterSystemTime)
1777     throws IOException {
1778     updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime);
1779   }
1780 
1781   /**
1782    * Updates the location of the specified region to be the specified server.
1783    * <p>
1784    * Connects to the specified server which should be hosting the specified
1785    * catalog region name to perform the edit.
1786    *
1787    * @param connection connection we're using
1788    * @param regionInfo region to update location of
1789    * @param sn Server name
1790    * @param openSeqNum the latest sequence number obtained when the region was open
1791    * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
1792    * @throws IOException In particular could throw {@link java.net.ConnectException}
1793    * if the server is down on other end.
1794    */
1795   private static void updateLocation(final Connection connection,
1796                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum,
1797                                      long masterSystemTime)
1798     throws IOException {
1799 
1800     // use the maximum of what master passed us vs local time.
1801     long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
1802 
1803     // region replicas are kept in the primary region's row
1804     Put put = new Put(getMetaKeyForRegion(regionInfo), time);
1805     addLocation(put, sn, openSeqNum, time, regionInfo.getReplicaId());
1806     putToMetaTable(connection, put);
1807     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
1808       " with server=" + sn);
1809   }
1810 
1811   /**
1812    * Deletes the specified region from META.
1813    * @param connection connection we're using
1814    * @param regionInfo region to be deleted from META
1815    * @throws IOException
1816    */
1817   public static void deleteRegion(Connection connection,
1818                                   HRegionInfo regionInfo)
1819     throws IOException {
1820     long time = EnvironmentEdgeManager.currentTime();
1821     Delete delete = new Delete(regionInfo.getRegionName());
1822     delete.addFamily(getCatalogFamily(), time);
1823     deleteFromMetaTable(connection, delete);
1824     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1825   }
1826 
1827   /**
1828    * Deletes the specified regions from META.
1829    * @param connection connection we're using
1830    * @param regionsInfo list of regions to be deleted from META
1831    * @throws IOException
1832    */
1833   public static void deleteRegions(Connection connection,
1834                                    List<HRegionInfo> regionsInfo) throws IOException {
1835     deleteRegions(connection, regionsInfo, EnvironmentEdgeManager.currentTime());
1836   }
1837   /**
1838    * Deletes the specified regions from META.
1839    * @param connection connection we're using
1840    * @param regionsInfo list of regions to be deleted from META
1841    * @throws IOException
1842    */
1843   public static void deleteRegions(Connection connection,
1844                                    List<HRegionInfo> regionsInfo, long ts) throws IOException {
1845     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1846     for (HRegionInfo hri: regionsInfo) {
1847       Delete e = new Delete(hri.getRegionName());
1848       e.addFamily(getCatalogFamily(), ts);
1849       deletes.add(e);
1850     }
1851     deleteFromMetaTable(connection, deletes);
1852     LOG.info("Deleted " + regionsInfo);
1853   }
1854 
1855   /**
1856    * Adds and Removes the specified regions from hbase:meta
1857    * @param connection connection we're using
1858    * @param regionsToRemove list of regions to be deleted from META
1859    * @param regionsToAdd list of regions to be added to META
1860    * @throws IOException
1861    */
1862   public static void mutateRegions(Connection connection,
1863                                    final List<HRegionInfo> regionsToRemove,
1864                                    final List<HRegionInfo> regionsToAdd)
1865     throws IOException {
1866     List<Mutation> mutation = new ArrayList<Mutation>();
1867     if (regionsToRemove != null) {
1868       for (HRegionInfo hri: regionsToRemove) {
1869         mutation.add(makeDeleteFromRegionInfo(hri));
1870       }
1871     }
1872     if (regionsToAdd != null) {
1873       for (HRegionInfo hri: regionsToAdd) {
1874         mutation.add(makePutFromRegionInfo(hri));
1875       }
1876     }
1877     mutateMetaTable(connection, mutation);
1878     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1879       LOG.debug("Deleted " + regionsToRemove);
1880     }
1881     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1882       LOG.debug("Added " + regionsToAdd);
1883     }
1884   }
1885 
1886   /**
1887    * Overwrites the specified regions from hbase:meta
1888    * @param connection connection we're using
1889    * @param regionInfos list of regions to be added to META
1890    * @throws IOException
1891    */
1892   public static void overwriteRegions(Connection connection,
1893       List<HRegionInfo> regionInfos, int regionReplication) throws IOException {
1894     // use master time for delete marker and the Put
1895     long now = EnvironmentEdgeManager.currentTime();
1896     deleteRegions(connection, regionInfos, now);
1897     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1898     // eclipse the following puts, that might happen in the same ts from the server.
1899     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1900     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1901     //
1902     // HBASE-13875 uses master timestamp for the mutations. The 20ms sleep is not needed
1903     addRegionsToMeta(connection, regionInfos, regionReplication, now+1);
1904     LOG.info("Overwritten " + regionInfos);
1905   }
1906 
1907   /**
1908    * Deletes merge qualifiers for the specified merged region.
1909    * @param connection connection we're using
1910    * @param mergedRegion
1911    * @throws IOException
1912    */
1913   public static void deleteMergeQualifiers(Connection connection,
1914                                            final HRegionInfo mergedRegion) throws IOException {
1915     long time = EnvironmentEdgeManager.currentTime();
1916     Delete delete = new Delete(mergedRegion.getRegionName());
1917     delete.addColumns(getCatalogFamily(), HConstants.MERGEA_QUALIFIER, time);
1918     delete.addColumns(getCatalogFamily(), HConstants.MERGEB_QUALIFIER, time);
1919     deleteFromMetaTable(connection, delete);
1920     LOG.info("Deleted references in merged region "
1921       + mergedRegion.getRegionNameAsString() + ", qualifier="
1922       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1923       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1924   }
1925 
1926   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1927     throws IOException {
1928     p.addImmutable(getCatalogFamily(), HConstants.REGIONINFO_QUALIFIER,
1929       hri.toByteArray());
1930     return p;
1931   }
1932 
1933   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum,
1934       long time, int replicaId){
1935     if (time <= 0) {
1936       time = EnvironmentEdgeManager.currentTime();
1937     }
1938     p.addImmutable(getCatalogFamily(), getServerColumn(replicaId), time,
1939       Bytes.toBytes(sn.getHostAndPort()));
1940     p.addImmutable(getCatalogFamily(), getStartCodeColumn(replicaId), time,
1941       Bytes.toBytes(sn.getStartcode()));
1942     p.addImmutable(getCatalogFamily(), getSeqNumColumn(replicaId), time,
1943       Bytes.toBytes(openSeqNum));
1944     return p;
1945   }
1946 
1947   public static Put addEmptyLocation(final Put p, int replicaId) {
1948     long now = EnvironmentEdgeManager.currentTime();
1949     p.addImmutable(getCatalogFamily(), getServerColumn(replicaId), now, null);
1950     p.addImmutable(getCatalogFamily(), getStartCodeColumn(replicaId), now, null);
1951     p.addImmutable(getCatalogFamily(), getSeqNumColumn(replicaId), now, null);
1952     return p;
1953   }
1954 
1955   private static String mutationsToString(Mutation ... mutations) throws IOException {
1956     StringBuilder sb = new StringBuilder();
1957     String prefix = "";
1958     for (Mutation mutation : mutations) {
1959       sb.append(prefix).append(mutationToString(mutation));
1960       prefix = ", ";
1961     }
1962     return sb.toString();
1963   }
1964 
1965   private static String mutationsToString(List<? extends Mutation> mutations) throws IOException {
1966     StringBuilder sb = new StringBuilder();
1967     String prefix = "";
1968     for (Mutation mutation : mutations) {
1969       sb.append(prefix).append(mutationToString(mutation));
1970       prefix = ", ";
1971     }
1972     return sb.toString();
1973   }
1974 
1975   private static String mutationToString(Mutation p) throws IOException {
1976     return p.getClass().getSimpleName() + p.toJSON();
1977   }
1978 }