1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.io.InterruptedIOException;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.UUID;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.hadoop.hbase.classification.InterfaceAudience;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FSDataInputStream;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.FileUtil;
40 import org.apache.hadoop.fs.Path;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.Cell;
43 import org.apache.hadoop.hbase.HColumnDescriptor;
44 import org.apache.hadoop.hbase.HConstants;
45 import org.apache.hadoop.hbase.HRegionInfo;
46 import org.apache.hadoop.hbase.HTableDescriptor;
47 import org.apache.hadoop.hbase.KeyValue;
48 import org.apache.hadoop.hbase.KeyValueUtil;
49 import org.apache.hadoop.hbase.backup.HFileArchiver;
50 import org.apache.hadoop.hbase.fs.HFileSystem;
51 import org.apache.hadoop.hbase.io.Reference;
52 import org.apache.hadoop.hbase.util.Bytes;
53 import org.apache.hadoop.hbase.util.FSHDFSUtils;
54 import org.apache.hadoop.hbase.util.FSUtils;
55 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
56
57
58
59
60
61 @InterfaceAudience.Private
62 public class HRegionFileSystem {
63 private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
64
65
66 public final static String REGION_INFO_FILE = ".regioninfo";
67
68
69 public static final String REGION_MERGES_DIR = ".merges";
70
71
72 public static final String REGION_SPLITS_DIR = ".splits";
73
74
75 private static final String REGION_TEMP_DIR = ".tmp";
76
77 private final HRegionInfo regionInfo;
78
79 private final HRegionInfo regionInfoForFs;
80 private final Configuration conf;
81 private final Path tableDir;
82 private final FileSystem fs;
83
84
85
86
87
88 private final int hdfsClientRetriesNumber;
89 private final int baseSleepBeforeRetries;
90 private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
91 private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
92
93
94
95
96
97
98
99
100 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
101 final HRegionInfo regionInfo) {
102 this.fs = fs;
103 this.conf = conf;
104 this.tableDir = tableDir;
105 this.regionInfo = regionInfo;
106 this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
107 this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
108 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
109 this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
110 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
111 }
112
113
114 public FileSystem getFileSystem() {
115 return this.fs;
116 }
117
118
119 public HRegionInfo getRegionInfo() {
120 return this.regionInfo;
121 }
122
123 public HRegionInfo getRegionInfoForFS() {
124 return this.regionInfoForFs;
125 }
126
127
128 public Path getTableDir() {
129 return this.tableDir;
130 }
131
132
133 public Path getRegionDir() {
134 return new Path(this.tableDir, this.regionInfoForFs.getEncodedName());
135 }
136
137
138
139
140
141 Path getTempDir() {
142 return new Path(getRegionDir(), REGION_TEMP_DIR);
143 }
144
145
146
147
148 void cleanupTempDir() throws IOException {
149 deleteDir(getTempDir());
150 }
151
152
153
154
155
156
157
158
159
160 public Path getStoreDir(final String familyName) {
161 return new Path(this.getRegionDir(), familyName);
162 }
163
164
165
166
167
168
169
170 Path createStoreDir(final String familyName) throws IOException {
171 Path storeDir = getStoreDir(familyName);
172 if(!fs.exists(storeDir) && !createDir(storeDir))
173 throw new IOException("Failed creating "+storeDir);
174 return storeDir;
175 }
176
177
178
179
180
181
182
183 public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
184 return getStoreFiles(Bytes.toString(familyName));
185 }
186
187 public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
188 return getStoreFiles(familyName, true);
189 }
190
191
192
193
194
195
196
197 public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
198 throws IOException {
199 Path familyDir = getStoreDir(familyName);
200 FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
201 if (files == null) {
202 LOG.debug("No StoreFiles for: " + familyDir);
203 return null;
204 }
205
206 ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
207 for (FileStatus status: files) {
208 if (validate && !StoreFileInfo.isValid(status)) {
209 LOG.warn("Invalid StoreFile: " + status.getPath());
210 continue;
211 }
212 StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
213 regionInfoForFs, familyName, status.getPath());
214 storeFiles.add(info);
215
216 }
217 return storeFiles;
218 }
219
220
221
222
223
224
225
226
227 Path getStoreFilePath(final String familyName, final String fileName) {
228 Path familyDir = getStoreDir(familyName);
229 return new Path(familyDir, fileName).makeQualified(this.fs);
230 }
231
232
233
234
235
236
237
238
239 StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
240 throws IOException {
241 Path familyDir = getStoreDir(familyName);
242 return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
243 regionInfoForFs, familyName, new Path(familyDir, fileName));
244 }
245
246
247
248
249
250
251
252 public boolean hasReferences(final String familyName) throws IOException {
253 FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
254 new FSUtils.ReferenceFileFilter(fs));
255 return files != null && files.length > 0;
256 }
257
258
259
260
261
262
263
264 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
265 for (HColumnDescriptor family : htd.getFamilies()) {
266 if (hasReferences(family.getNameAsString())) {
267 return true;
268 }
269 }
270 return false;
271 }
272
273
274
275
276
277 public Collection<String> getFamilies() throws IOException {
278 FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
279 if (fds == null) return null;
280
281 ArrayList<String> families = new ArrayList<String>(fds.length);
282 for (FileStatus status: fds) {
283 families.add(status.getPath().getName());
284 }
285
286 return families;
287 }
288
289
290
291
292
293
294 public void deleteFamily(final String familyName) throws IOException {
295
296 HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
297
298
299 Path familyDir = getStoreDir(familyName);
300 if(fs.exists(familyDir) && !deleteDir(familyDir))
301 throw new IOException("Could not delete family " + familyName
302 + " from FileSystem for region " + regionInfoForFs.getRegionNameAsString() + "("
303 + regionInfoForFs.getEncodedName() + ")");
304 }
305
306
307
308
309
310
311 private static String generateUniqueName(final String suffix) {
312 String name = UUID.randomUUID().toString().replaceAll("-", "");
313 if (suffix != null) name += suffix;
314 return name;
315 }
316
317
318
319
320
321
322
323
324
325
326
327
328 public Path createTempName() {
329 return createTempName(null);
330 }
331
332
333
334
335
336
337
338
339
340
341
342
343
344 public Path createTempName(final String suffix) {
345 return new Path(getTempDir(), generateUniqueName(suffix));
346 }
347
348
349
350
351
352
353
354
355 public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
356 return commitStoreFile(familyName, buildPath, -1, false);
357 }
358
359
360
361
362
363
364
365
366
367
368 private Path commitStoreFile(final String familyName, final Path buildPath,
369 final long seqNum, final boolean generateNewName) throws IOException {
370 Path storeDir = getStoreDir(familyName);
371 if(!fs.exists(storeDir) && !createDir(storeDir))
372 throw new IOException("Failed creating " + storeDir);
373
374 String name = buildPath.getName();
375 if (generateNewName) {
376 name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
377 }
378 Path dstPath = new Path(storeDir, name);
379 if (!fs.exists(buildPath)) {
380 throw new FileNotFoundException(buildPath.toString());
381 }
382 LOG.debug("Committing store file " + buildPath + " as " + dstPath);
383
384 if (!rename(buildPath, dstPath)) {
385 throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
386 }
387 return dstPath;
388 }
389
390
391
392
393
394
395
396 void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
397 for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
398 String familyName = Bytes.toString(es.getKey());
399 for (StoreFile sf: es.getValue()) {
400 commitStoreFile(familyName, sf.getPath());
401 }
402 }
403 }
404
405
406
407
408
409
410
411 public void removeStoreFile(final String familyName, final Path filePath)
412 throws IOException {
413 HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
414 this.tableDir, Bytes.toBytes(familyName), filePath);
415 }
416
417
418
419
420
421
422
423 public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
424 throws IOException {
425 HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
426 this.tableDir, Bytes.toBytes(familyName), storeFiles);
427 }
428
429
430
431
432
433
434
435
436
437
438
439
440 Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
441 throws IOException {
442
443 FileSystem srcFs = srcPath.getFileSystem(conf);
444 FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
445
446
447
448
449 if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
450 LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
451 "the destination store. Copying file over to destination filesystem.");
452 Path tmpPath = createTempName();
453 FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
454 LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
455 srcPath = tmpPath;
456 }
457
458 return commitStoreFile(familyName, srcPath, seqNum, true);
459 }
460
461
462
463
464
465 Path getSplitsDir() {
466 return new Path(getRegionDir(), REGION_SPLITS_DIR);
467 }
468
469 Path getSplitsDir(final HRegionInfo hri) {
470 return new Path(getSplitsDir(), hri.getEncodedName());
471 }
472
473
474
475
476 void cleanupSplitsDir() throws IOException {
477 deleteDir(getSplitsDir());
478 }
479
480
481
482
483
484
485
486 void cleanupAnySplitDetritus() throws IOException {
487 Path splitdir = this.getSplitsDir();
488 if (!fs.exists(splitdir)) return;
489
490
491
492
493
494
495
496 FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
497 if (daughters != null) {
498 for (FileStatus daughter: daughters) {
499 Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
500 if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
501 throw new IOException("Failed delete of " + daughterDir);
502 }
503 }
504 }
505 cleanupSplitsDir();
506 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
507 }
508
509
510
511
512
513
514 void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
515 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
516 if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
517 throw new IOException("Failed delete of " + regionDir);
518 }
519 }
520
521
522
523
524
525
526
527
528 Path commitDaughterRegion(final HRegionInfo regionInfo)
529 throws IOException {
530 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
531 Path daughterTmpDir = this.getSplitsDir(regionInfo);
532
533 if (fs.exists(daughterTmpDir)) {
534
535
536 Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
537 byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
538 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
539
540
541 if (!rename(daughterTmpDir, regionDir)) {
542 throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
543 }
544 }
545
546 return regionDir;
547 }
548
549
550
551
552 void createSplitsDir() throws IOException {
553 Path splitdir = getSplitsDir();
554 if (fs.exists(splitdir)) {
555 LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
556 if (!deleteDir(splitdir)) {
557 throw new IOException("Failed deletion of " + splitdir
558 + " before creating them again.");
559 }
560 }
561
562 if (!createDir(splitdir)) {
563 throw new IOException("Failed create of " + splitdir);
564 }
565 }
566
567
568
569
570
571
572
573
574
575
576
577
578
579 Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
580 final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy)
581 throws IOException {
582
583 if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) {
584
585
586 try {
587 if (top) {
588
589 KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
590 Cell lastKey = f.getLastKey();
591
592 if (lastKey == null) {
593 return null;
594 }
595 if (f.getComparator().compare(splitKey, lastKey) > 0) {
596 return null;
597 }
598 } else {
599
600 KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
601 Cell firstKey = f.getFirstKey();
602
603 if (firstKey == null) {
604 return null;
605 }
606 if (f.getComparator().compare(splitKey, firstKey) < 0) {
607 return null;
608 }
609 }
610 } finally {
611 f.closeReader(true);
612 }
613 }
614
615 Path splitDir = new Path(getSplitsDir(hri), familyName);
616
617 Reference r =
618 top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
619
620
621
622
623 String parentRegionName = regionInfoForFs.getEncodedName();
624
625
626 Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
627 return r.write(fs, p);
628 }
629
630
631
632
633
634 Path getMergesDir() {
635 return new Path(getRegionDir(), REGION_MERGES_DIR);
636 }
637
638 Path getMergesDir(final HRegionInfo hri) {
639 return new Path(getMergesDir(), hri.getEncodedName());
640 }
641
642
643
644
645 void cleanupMergesDir() throws IOException {
646 deleteDir(getMergesDir());
647 }
648
649
650
651
652
653
654 void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
655 Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
656 if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
657 throw new IOException("Failed delete of " + regionDir);
658 }
659 }
660
661
662
663
664
665
666 void createMergesDir() throws IOException {
667 Path mergesdir = getMergesDir();
668 if (fs.exists(mergesdir)) {
669 LOG.info("The " + mergesdir
670 + " directory exists. Hence deleting it to recreate it");
671 if (!fs.delete(mergesdir, true)) {
672 throw new IOException("Failed deletion of " + mergesdir
673 + " before creating them again.");
674 }
675 }
676 if (!fs.mkdirs(mergesdir))
677 throw new IOException("Failed create of " + mergesdir);
678 }
679
680
681
682
683
684
685
686
687
688
689
690 Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
691 final StoreFile f, final Path mergedDir)
692 throws IOException {
693 Path referenceDir = new Path(new Path(mergedDir,
694 mergedRegion.getEncodedName()), familyName);
695
696 Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
697
698
699
700
701 String mergingRegionName = regionInfoForFs.getEncodedName();
702
703
704 Path p = new Path(referenceDir, f.getPath().getName() + "."
705 + mergingRegionName);
706 return r.write(fs, p);
707 }
708
709
710
711
712
713
714
715 void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
716 Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
717 Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
718
719 if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
720 if (!fs.rename(mergedRegionTmpDir, regionDir)) {
721 throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
722 + regionDir);
723 }
724 }
725 }
726
727
728
729
730
731
732
733
734
735 void logFileSystemState(final Log LOG) throws IOException {
736 FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
737 }
738
739
740
741
742
743
744 private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
745 return hri.toDelimitedByteArray();
746 }
747
748
749
750
751
752
753
754
755 public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
756 throws IOException {
757 FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
758 try {
759 return HRegionInfo.parseFrom(in);
760 } finally {
761 in.close();
762 }
763 }
764
765
766
767
768 private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
769 final Path regionInfoFile, final byte[] content) throws IOException {
770
771 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
772
773 FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
774 try {
775 out.write(content);
776 } finally {
777 out.close();
778 }
779 }
780
781
782
783
784
785 void checkRegionInfoOnFilesystem() throws IOException {
786
787
788
789
790
791 byte[] content = getRegionInfoFileContent(regionInfoForFs);
792 try {
793 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
794
795 FileStatus status = fs.getFileStatus(regionInfoFile);
796 if (status != null && status.getLen() == content.length) {
797
798
799 return;
800 }
801
802 LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
803 if (!fs.delete(regionInfoFile, false)) {
804 throw new IOException("Unable to remove existing " + regionInfoFile);
805 }
806 } catch (FileNotFoundException e) {
807 LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName() +
808 " on table " + regionInfo.getTable());
809 }
810
811
812 writeRegionInfoOnFilesystem(content, true);
813 }
814
815
816
817
818
819 private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
820 byte[] content = getRegionInfoFileContent(regionInfoForFs);
821 writeRegionInfoOnFilesystem(content, useTempDir);
822 }
823
824
825
826
827
828
829 private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
830 final boolean useTempDir) throws IOException {
831 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
832 if (useTempDir) {
833
834
835
836
837
838
839 Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
840
841
842
843
844
845 if (FSUtils.isExists(fs, tmpPath)) {
846 FSUtils.delete(fs, tmpPath, true);
847 }
848
849
850 writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
851
852
853 if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
854 throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
855 }
856 } else {
857
858 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
859 }
860 }
861
862
863
864
865
866
867
868
869
870 public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
871 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
872 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
873 Path regionDir = regionFs.getRegionDir();
874
875 if (fs.exists(regionDir)) {
876 LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
877 throw new IOException("The specified region already exists on disk: " + regionDir);
878 }
879
880
881 if (!createDirOnFileSystem(fs, conf, regionDir)) {
882 LOG.warn("Unable to create the region directory: " + regionDir);
883 throw new IOException("Unable to create region directory: " + regionDir);
884 }
885
886
887 regionFs.writeRegionInfoOnFilesystem(false);
888 return regionFs;
889 }
890
891
892
893
894
895
896
897
898
899
900 public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
901 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
902 throws IOException {
903 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
904 Path regionDir = regionFs.getRegionDir();
905
906 if (!fs.exists(regionDir)) {
907 LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
908 throw new IOException("The specified region do not exists on disk: " + regionDir);
909 }
910
911 if (!readOnly) {
912
913 regionFs.cleanupTempDir();
914 regionFs.cleanupSplitsDir();
915 regionFs.cleanupMergesDir();
916
917
918 regionFs.checkRegionInfoOnFilesystem();
919 }
920
921 return regionFs;
922 }
923
924
925
926
927
928
929
930
931
932 public static void deleteRegionFromFileSystem(final Configuration conf,
933 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
934 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
935 Path regionDir = regionFs.getRegionDir();
936
937 if (!fs.exists(regionDir)) {
938 LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
939 return;
940 }
941
942 if (LOG.isDebugEnabled()) {
943 LOG.debug("DELETING region " + regionDir);
944 }
945
946
947 Path rootDir = FSUtils.getRootDir(conf);
948 HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
949
950
951 if (!fs.delete(regionDir, true)) {
952 LOG.warn("Failed delete of " + regionDir);
953 }
954 }
955
956
957
958
959
960
961
962
963 boolean createDir(Path dir) throws IOException {
964 int i = 0;
965 IOException lastIOE = null;
966 do {
967 try {
968 return fs.mkdirs(dir);
969 } catch (IOException ioe) {
970 lastIOE = ioe;
971 if (fs.exists(dir)) return true;
972 try {
973 sleepBeforeRetry("Create Directory", i+1);
974 } catch (InterruptedException e) {
975 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
976 }
977 }
978 } while (++i <= hdfsClientRetriesNumber);
979 throw new IOException("Exception in createDir", lastIOE);
980 }
981
982
983
984
985
986
987
988
989 boolean rename(Path srcpath, Path dstPath) throws IOException {
990 IOException lastIOE = null;
991 int i = 0;
992 do {
993 try {
994 return fs.rename(srcpath, dstPath);
995 } catch (IOException ioe) {
996 lastIOE = ioe;
997 if (!fs.exists(srcpath) && fs.exists(dstPath)) return true;
998
999 try {
1000 sleepBeforeRetry("Rename Directory", i+1);
1001 } catch (InterruptedException e) {
1002 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1003 }
1004 }
1005 } while (++i <= hdfsClientRetriesNumber);
1006
1007 throw new IOException("Exception in rename", lastIOE);
1008 }
1009
1010
1011
1012
1013
1014
1015
1016 boolean deleteDir(Path dir) throws IOException {
1017 IOException lastIOE = null;
1018 int i = 0;
1019 do {
1020 try {
1021 return fs.delete(dir, true);
1022 } catch (IOException ioe) {
1023 lastIOE = ioe;
1024 if (!fs.exists(dir)) return true;
1025
1026 try {
1027 sleepBeforeRetry("Delete Directory", i+1);
1028 } catch (InterruptedException e) {
1029 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1030 }
1031 }
1032 } while (++i <= hdfsClientRetriesNumber);
1033
1034 throw new IOException("Exception in DeleteDir", lastIOE);
1035 }
1036
1037
1038
1039
1040 private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
1041 sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1042 }
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
1055 throws IOException {
1056 int i = 0;
1057 IOException lastIOE = null;
1058 int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
1059 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
1060 int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
1061 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
1062 do {
1063 try {
1064 return fs.mkdirs(dir);
1065 } catch (IOException ioe) {
1066 lastIOE = ioe;
1067 if (fs.exists(dir)) return true;
1068 try {
1069 sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1070 } catch (InterruptedException e) {
1071 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1072 }
1073 }
1074 } while (++i <= hdfsClientRetriesNumber);
1075
1076 throw new IOException("Exception in createDir", lastIOE);
1077 }
1078
1079
1080
1081
1082
1083 private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1084 int hdfsClientRetriesNumber) throws InterruptedException {
1085 if (sleepMultiplier > hdfsClientRetriesNumber) {
1086 LOG.debug(msg + ", retries exhausted");
1087 return;
1088 }
1089 LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1090 Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1091 }
1092 }