Formatting using clusterid: testClusterID 2014-09-03 14:59:28,775 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:649)] fsLock is fair:true 2014-09-03 14:59:28,776 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.HostFileManager.refresh(HostFileManager.java:304)] read includes: HostSet( ) 2014-09-03 14:59:28,777 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.HostFileManager.refresh(HostFileManager.java:311)] read excludes: HostSet( ) 2014-09-03 14:59:28,777 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.(DatanodeManager.java:229)] dfs.block.invalidate.limit=1000 2014-09-03 14:59:28,777 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.(DatanodeManager.java:235)] dfs.namenode.datanode.registration.ip-hostname-check=true 2014-09-03 14:59:28,777 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map BlocksMap 2014-09-03 14:59:28,778 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,778 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 2.0% max memory 455 MB = 9.1 MB 2014-09-03 14:59:28,778 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^20 = 1048576 entries 2014-09-03 14:59:28,779 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.createBlockTokenSecretManager(BlockManager.java:328)] dfs.block.access.token.enable=false 2014-09-03 14:59:28,780 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:313)] defaultReplication = 1 2014-09-03 14:59:28,780 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:314)] maxReplication = 512 2014-09-03 14:59:28,781 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:315)] minReplication = 1 2014-09-03 14:59:28,781 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:316)] maxReplicationStreams = 2 2014-09-03 14:59:28,781 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:317)] shouldCheckForEnoughRacks = false 2014-09-03 14:59:28,781 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:318)] replicationRecheckInterval = 3000 2014-09-03 14:59:28,782 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:319)] encryptDataTransfer = false 2014-09-03 14:59:28,782 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:320)] maxNumBlocksToLog = 1000 2014-09-03 14:59:28,782 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:666)] fsOwner = cloudera (auth:SIMPLE) 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:667)] supergroup = supergroup 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:668)] isPermissionEnabled = true 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:679)] HA Enabled: false 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:716)] Append Enabled: true 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map INodeMap 2014-09-03 14:59:28,783 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,784 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 1.0% max memory 455 MB = 4.5 MB 2014-09-03 14:59:28,784 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^19 = 524288 entries 2014-09-03 14:59:28,784 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSDirectory.(FSDirectory.java:189)] Caching file names occuring more than 10 times 2014-09-03 14:59:28,785 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map cachedBlocks 2014-09-03 14:59:28,785 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,785 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 0.25% max memory 455 MB = 1.1 MB 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^17 = 131072 entries 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4614)] dfs.namenode.safemode.threshold-pct = 0.9990000128746033 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4615)] dfs.namenode.safemode.min.datanodes = 0 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4616)] dfs.namenode.safemode.extension = 0 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initRetryCache(FSNamesystem.java:787)] Retry cache on namenode is enabled 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initRetryCache(FSNamesystem.java:795)] Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map Namenode Retry Cache 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,786 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 0.029999999329447746% max memory 455 MB = 139.8 KB 2014-09-03 14:59:28,787 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^14 = 16384 entries 2014-09-03 14:59:28,792 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:527)] Storage directory /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1 has been successfully formatted. 2014-09-03 14:59:28,794 (DataNode: [[[DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data1/, [DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data2/]] heartbeating to quickstart.cloudera:36446) [WARN - org.apache.hadoop.hdfs.server.datanode.BPServiceActor.retrieveNamespaceInfo(BPServiceActor.java:174)] Problem connecting to server: quickstart.cloudera:36446 2014-09-03 14:59:28,798 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:527)] Storage directory /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2 has been successfully formatted. 2014-09-03 14:59:28,806 (FSImageSaver for /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2 of type IMAGE_AND_EDITS) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Saver.save(FSImageFormat.java:1039)] Saving image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2/current/fsimage.ckpt_0000000000000000000 using no compression 2014-09-03 14:59:28,806 (FSImageSaver for /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1 of type IMAGE_AND_EDITS) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Saver.save(FSImageFormat.java:1039)] Saving image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current/fsimage.ckpt_0000000000000000000 using no compression 2014-09-03 14:59:28,816 (FSImageSaver for /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2 of type IMAGE_AND_EDITS) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Saver.save(FSImageFormat.java:1077)] Image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2/current/fsimage.ckpt_0000000000000000000 of size 220 bytes saved in 0 seconds. 2014-09-03 14:59:28,828 (FSImageSaver for /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1 of type IMAGE_AND_EDITS) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Saver.save(FSImageFormat.java:1077)] Image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current/fsimage.ckpt_0000000000000000000 of size 220 bytes saved in 0 seconds. 2014-09-03 14:59:28,832 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.getImageTxIdToRetain(NNStorageRetentionManager.java:177)] Going to retain 1 images with txid >= 0 2014-09-03 14:59:28,833 (main) [INFO - org.apache.hadoop.metrics2.impl.MetricsSystemImpl.init(MetricsSystemImpl.java:151)] NameNode metrics system started (again) 2014-09-03 14:59:28,844 (main) [INFO - org.apache.hadoop.hdfs.DFSUtil.httpServerTemplateForNNAndJN(DFSUtil.java:1581)] Starting web server as: ${dfs.web.authentication.kerberos.principal} 2014-09-03 14:59:28,844 (main) [INFO - org.apache.hadoop.hdfs.DFSUtil.httpServerTemplateForNNAndJN(DFSUtil.java:1592)] Starting Web-server for hdfs at: http://127.0.0.1:0 2014-09-03 14:59:28,844 (main) [INFO - org.apache.hadoop.http.HttpRequestLog.getRequestLog(HttpRequestLog.java:80)] Http request log for http.requests.namenode is not defined 2014-09-03 14:59:28,845 (main) [INFO - org.apache.hadoop.http.HttpServer2.addGlobalFilter(HttpServer2.java:658)] Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter) 2014-09-03 14:59:28,845 (main) [INFO - org.apache.hadoop.http.HttpServer2.addFilter(HttpServer2.java:636)] Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context hdfs 2014-09-03 14:59:28,845 (main) [INFO - org.apache.hadoop.http.HttpServer2.addFilter(HttpServer2.java:643)] Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static 2014-09-03 14:59:28,846 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.initWebHdfs(NameNodeHttpServer.java:81)] Added filter 'SPNEGO' (class=org.apache.hadoop.hdfs.web.AuthFilter) 2014-09-03 14:59:28,846 (main) [INFO - org.apache.hadoop.http.HttpServer2.addJerseyResourcePackage(HttpServer2.java:562)] addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/* 2014-09-03 14:59:28,846 (main) [INFO - org.apache.hadoop.http.HttpServer2.openListeners(HttpServer2.java:846)] Jetty bound to port 43458 2014-09-03 14:59:28,846 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] jetty-6.1.26 2014-09-03 14:59:28,850 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] Extract jar:file:/home/cloudera/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.3.0/hadoop-hdfs-2.3.0-tests.jar!/webapps/hdfs to /tmp/Jetty_127_0_0_1_43458_hdfs____fc29jt/webapp 2014-09-03 14:59:28,912 (main) [WARN - org.apache.hadoop.security.authentication.server.AuthenticationFilter.init(AuthenticationFilter.java:158)] 'signature.secret' configuration not set, using a random value as secret 2014-09-03 14:59:28,914 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] Started SelectChannelConnector@127.0.0.1:43458 2014-09-03 14:59:28,916 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:649)] fsLock is fair:true 2014-09-03 14:59:28,917 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.HostFileManager.refresh(HostFileManager.java:304)] read includes: HostSet( ) 2014-09-03 14:59:28,917 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.HostFileManager.refresh(HostFileManager.java:311)] read excludes: HostSet( ) 2014-09-03 14:59:28,917 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.(DatanodeManager.java:229)] dfs.block.invalidate.limit=1000 2014-09-03 14:59:28,917 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.(DatanodeManager.java:235)] dfs.namenode.datanode.registration.ip-hostname-check=true 2014-09-03 14:59:28,918 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map BlocksMap 2014-09-03 14:59:28,918 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,918 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 2.0% max memory 455 MB = 9.1 MB 2014-09-03 14:59:28,918 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^20 = 1048576 entries 2014-09-03 14:59:28,919 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.createBlockTokenSecretManager(BlockManager.java:328)] dfs.block.access.token.enable=false 2014-09-03 14:59:28,919 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:313)] defaultReplication = 1 2014-09-03 14:59:28,919 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:314)] maxReplication = 512 2014-09-03 14:59:28,919 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:315)] minReplication = 1 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:316)] maxReplicationStreams = 2 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:317)] shouldCheckForEnoughRacks = false 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:318)] replicationRecheckInterval = 3000 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:319)] encryptDataTransfer = false 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.(BlockManager.java:320)] maxNumBlocksToLog = 1000 2014-09-03 14:59:28,920 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:666)] fsOwner = cloudera (auth:SIMPLE) 2014-09-03 14:59:28,921 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:667)] supergroup = supergroup 2014-09-03 14:59:28,921 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:668)] isPermissionEnabled = true 2014-09-03 14:59:28,921 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:679)] HA Enabled: false 2014-09-03 14:59:28,921 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(FSNamesystem.java:716)] Append Enabled: true 2014-09-03 14:59:28,922 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map INodeMap 2014-09-03 14:59:28,922 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,922 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 1.0% max memory 455 MB = 4.5 MB 2014-09-03 14:59:28,923 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^19 = 524288 entries 2014-09-03 14:59:28,923 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSDirectory.(FSDirectory.java:189)] Caching file names occuring more than 10 times 2014-09-03 14:59:28,923 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map cachedBlocks 2014-09-03 14:59:28,923 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,923 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 0.25% max memory 455 MB = 1.1 MB 2014-09-03 14:59:28,924 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^17 = 131072 entries 2014-09-03 14:59:28,924 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4614)] dfs.namenode.safemode.threshold-pct = 0.9990000128746033 2014-09-03 14:59:28,924 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4615)] dfs.namenode.safemode.min.datanodes = 0 2014-09-03 14:59:28,924 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.(FSNamesystem.java:4616)] dfs.namenode.safemode.extension = 0 2014-09-03 14:59:28,925 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initRetryCache(FSNamesystem.java:787)] Retry cache on namenode is enabled 2014-09-03 14:59:28,925 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initRetryCache(FSNamesystem.java:795)] Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis 2014-09-03 14:59:28,925 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:349)] Computing capacity for map Namenode Retry Cache 2014-09-03 14:59:28,925 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:350)] VM type = 64-bit 2014-09-03 14:59:28,925 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:351)] 0.029999999329447746% max memory 455 MB = 139.8 KB 2014-09-03 14:59:28,926 (main) [INFO - org.apache.hadoop.util.LightWeightGSet.computeCapacity(LightWeightGSet.java:356)] capacity = 2^14 = 16384 entries 2014-09-03 14:59:28,929 (main) [INFO - org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:675)] Lock on /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/in_use.lock acquired by nodename 10389@quickstart.cloudera 2014-09-03 14:59:28,931 (main) [INFO - org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:675)] Lock on /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2/in_use.lock acquired by nodename 10389@quickstart.cloudera 2014-09-03 14:59:28,932 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FileJournalManager.recoverUnfinalizedSegments(FileJournalManager.java:294)] Recovering unfinalized segments in /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current 2014-09-03 14:59:28,932 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FileJournalManager.recoverUnfinalizedSegments(FileJournalManager.java:294)] Recovering unfinalized segments in /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name2/current 2014-09-03 14:59:28,932 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:631)] No edit log streams selected. 2014-09-03 14:59:28,933 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Loader.load(FSImageFormat.java:336)] Loading image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current/fsimage_0000000000000000000 using no compression 2014-09-03 14:59:28,933 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Loader.load(FSImageFormat.java:339)] Number of files = 1 2014-09-03 14:59:28,933 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Loader.loadFilesUnderConstruction(FSImageFormat.java:837)] Number of files under construction = 0 2014-09-03 14:59:28,933 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImageFormat$Loader.load(FSImageFormat.java:373)] Image file /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current/fsimage_0000000000000000000 of size 220 bytes loaded in 0 seconds. 2014-09-03 14:59:28,934 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:841)] Loaded image for txid 0 from /home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/name1/current/fsimage_0000000000000000000 2014-09-03 14:59:28,934 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSEditLog.startLogSegment(FSEditLog.java:1074)] Starting log segment at 1 2014-09-03 14:59:28,951 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NameCache.initialized(NameCache.java:143)] initialized with 0 entries 0 lookups 2014-09-03 14:59:28,952 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:617)] Finished loading FSImage in 26 msecs 2014-09-03 14:59:28,952 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.(NameNodeRpcServer.java:279)] RPC server is binding to 127.0.0.1:0 2014-09-03 14:59:28,958 (Socket Reader #1 for port 51099) [INFO - org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:551)] Starting Socket Reader #1 for port 51099 2014-09-03 14:59:28,964 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.registerMBean(FSNamesystem.java:5560)] Registered FSNamesystemState MBean 2014-09-03 14:59:28,972 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getCompleteBlocksTotal(FSNamesystem.java:5217)] Number of blocks under construction: 0 2014-09-03 14:59:28,972 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getCompleteBlocksTotal(FSNamesystem.java:5217)] Number of blocks under construction: 0 2014-09-03 14:59:28,972 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.initializeReplQueues(FSNamesystem.java:4727)] initializing replication queues 2014-09-03 14:59:28,973 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.processMisReplicatedBlocks(BlockManager.java:2380)] Total number of blocks = 0 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.processMisReplicatedBlocks(BlockManager.java:2381)] Number of invalid blocks = 0 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.processMisReplicatedBlocks(BlockManager.java:2382)] Number of under-replicated blocks = 0 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.processMisReplicatedBlocks(BlockManager.java:2383)] Number of over-replicated blocks = 0 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.processMisReplicatedBlocks(BlockManager.java:2385)] Number of blocks being written = 0 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.initializeReplQueues(FSNamesystem.java:4732)] STATE* Replication Queue initialization scan for invalid, over- and under-replicated blocks completed in 1 msec 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.leave(FSNamesystem.java:4696)] STATE* Leaving safe mode after 0 secs 2014-09-03 14:59:28,974 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.leave(FSNamesystem.java:4707)] STATE* Network topology has 0 racks and 0 datanodes 2014-09-03 14:59:28,975 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem$SafeModeInfo.leave(FSNamesystem.java:4710)] STATE* UnderReplicatedBlocks has 0 blocks 2014-09-03 14:59:28,982 (IPC Server Responder) [INFO - org.apache.hadoop.ipc.Server$Responder.run(Server.java:773)] IPC Server Responder: starting 2014-09-03 14:59:28,983 (IPC Server listener on 51099) [INFO - org.apache.hadoop.ipc.Server$Listener.run(Server.java:620)] IPC Server listener on 51099: starting 2014-09-03 14:59:28,989 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.NameNode.startCommonServices(NameNode.java:544)] NameNode RPC up at: quickstart.cloudera/127.0.0.1:51099 2014-09-03 14:59:28,993 (main) [INFO - org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startActiveServices(FSNamesystem.java:949)] Starting services required for active state 2014-09-03 14:59:29,000 (CacheReplicationMonitor(1259954454)) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:158)] Starting CacheReplicationMonitor with interval 300000 milliseconds 2014-09-03 14:59:29,000 (CacheReplicationMonitor(1259954454)) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:171)] Rescanning because of pending operations 2014-09-03 14:59:29,000 (main) [INFO - org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1190)] Starting DataNode 0 with dfs.datanode.data.dir: [DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data1,[DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data2 2014-09-03 14:59:29,000 (CacheReplicationMonitor(1259954454)) [INFO - org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:201)] Scanned 0 directive(s) and 0 block(s) in 0 millisecond(s). 2014-09-03 14:59:29,011 (main) [INFO - org.apache.hadoop.metrics2.impl.MetricsSystemImpl.init(MetricsSystemImpl.java:151)] DataNode metrics system started (again) 2014-09-03 14:59:29,011 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.DataNode.(DataNode.java:315)] Configured hostname is 127.0.0.1 2014-09-03 14:59:29,012 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:763)] Starting DataNode with maxLockedMemory = 0 2014-09-03 14:59:29,012 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.DataNode.initDataXceiver(DataNode.java:564)] Opened streaming server at /127.0.0.1:37965 2014-09-03 14:59:29,013 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.DataXceiverServer$BlockBalanceThrottler.(DataXceiverServer.java:73)] Balancing bandwith is 1048576 bytes/s 2014-09-03 14:59:29,013 (main) [INFO - org.apache.hadoop.http.HttpRequestLog.getRequestLog(HttpRequestLog.java:80)] Http request log for http.requests.datanode is not defined 2014-09-03 14:59:29,014 (main) [INFO - org.apache.hadoop.http.HttpServer2.addGlobalFilter(HttpServer2.java:658)] Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter) 2014-09-03 14:59:29,014 (main) [INFO - org.apache.hadoop.http.HttpServer2.addFilter(HttpServer2.java:636)] Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context datanode 2014-09-03 14:59:29,014 (main) [INFO - org.apache.hadoop.http.HttpServer2.addFilter(HttpServer2.java:643)] Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static 2014-09-03 14:59:29,015 (main) [INFO - org.apache.hadoop.http.HttpServer2.addJerseyResourcePackage(HttpServer2.java:562)] addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.datanode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/* 2014-09-03 14:59:29,015 (main) [INFO - org.apache.hadoop.http.HttpServer2.openListeners(HttpServer2.java:846)] Jetty bound to port 56017 2014-09-03 14:59:29,015 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] jetty-6.1.26 2014-09-03 14:59:29,019 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] Extract jar:file:/home/cloudera/.m2/repository/org/apache/hadoop/hadoop-hdfs/2.3.0/hadoop-hdfs-2.3.0-tests.jar!/webapps/datanode to /tmp/Jetty_127_0_0_1_56017_datanode____.kk55n1/webapp 2014-09-03 14:59:29,110 (main) [INFO - org.mortbay.log.Slf4jLog.info(Slf4jLog.java:67)] Started SelectChannelConnector@127.0.0.1:56017 2014-09-03 14:59:29,113 (Socket Reader #1 for port 50079) [INFO - org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:551)] Starting Socket Reader #1 for port 50079 2014-09-03 14:59:29,120 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:466)] Opened IPC server at /127.0.0.1:50079 2014-09-03 14:59:29,128 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.BlockPoolManager.refreshNamenodes(BlockPoolManager.java:148)] Refresh request received for nameservices: null 2014-09-03 14:59:29,129 (main) [WARN - org.apache.hadoop.hdfs.DFSUtil.getAddressesForNameserviceId(DFSUtil.java:591)] Namenode for null remains unresolved for ID null. Check your hdfs-site.xml file to ensure namenodes are configured properly. 2014-09-03 14:59:29,129 (main) [INFO - org.apache.hadoop.hdfs.server.datanode.BlockPoolManager.doRefreshNamenodes(BlockPoolManager.java:193)] Starting BPOfferServices for nameservices: 2014-09-03 14:59:29,130 (DataNode: [[[DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data1/, [DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data2/]] heartbeating to quickstart.cloudera:51099) [INFO - org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:768)] Block pool (Datanode Uuid unassigned) service to quickstart.cloudera:51099 starting to offer service 2014-09-03 14:59:29,131 (IPC Server Responder) [INFO - org.apache.hadoop.ipc.Server$Responder.run(Server.java:773)] IPC Server Responder: starting 2014-09-03 14:59:29,131 (IPC Server listener on 50079) [INFO - org.apache.hadoop.ipc.Server$Listener.run(Server.java:620)] IPC Server listener on 50079: starting 2014-09-03 14:59:29,147 (DataNode: [[[DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data1/, [DISK]file:/home/cloudera/kite-release-0.16.0/kite-data/kite-data-core/build/test/data/dfs/data/data2/]] heartbeating to quickstart.cloudera:51099) [WARN - org.apache.hadoop.hdfs.server.datanode.BPServiceActor.retrieveNamespaceInfo(BPServiceActor.java:174)] Problem connecting to server: quickstart.cloudera:51099 2014-09-03 14:59:29,149 (main) [WARN - org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:1926)] Tried waitActive() 2 time(s) and failed, giving up. java.net.UnknownHostException: Invalid host name: local host is: (unknown); destination host is: "quickstart.cloudera":51099; java.net.UnknownHostException; For more details see: http://wiki.apache.org/hadoop/UnknownHost at sun.reflect.GeneratedConstructorAccessor29.newInstance(Unknown Source) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:783) at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:742) at org.apache.hadoop.ipc.Client$Connection.(Client.java:400) at org.apache.hadoop.ipc.Client.getConnection(Client.java:1448) at org.apache.hadoop.ipc.Client.call(Client.java:1377) at org.apache.hadoop.ipc.Client.call(Client.java:1359) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206) at com.sun.proxy.$Proxy24.getDatanodeReport(Unknown Source) at sun.reflect.GeneratedMethodAccessor17.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy24.getDatanodeReport(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getDatanodeReport(ClientNamenodeProtocolTranslatorPB.java:555) at org.apache.hadoop.hdfs.DFSClient.datanodeReport(DFSClient.java:2189) at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:1901) at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:1920) at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1238) at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:684) at org.apache.hadoop.hdfs.MiniDFSCluster.(MiniDFSCluster.java:603) at org.apache.hadoop.hdfs.MiniDFSCluster.(MiniDFSCluster.java:474) at org.kitesdk.data.MiniDFSTest.setupFS(MiniDFSTest.java:57) at org.kitesdk.data.TestDatasetDescriptor.testSchemaFromHdfs(TestDatasetDescriptor.java:54) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:45) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:42) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:263) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:68) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:47) at org.junit.runners.ParentRunner$3.run(ParentRunner.java:231) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:60) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:229) at org.junit.runners.ParentRunner.access$000(ParentRunner.java:50) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:222) at org.junit.runners.ParentRunner.run(ParentRunner.java:300) at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264) at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153) at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray2(ReflectionUtils.java:208) at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:159) at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:87) at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153) at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:95) Caused by: java.net.UnknownHostException ... 51 more