aboutsummaryrefslogtreecommitdiff
path: root/aarch64/share/hadoop
diff options
context:
space:
mode:
Diffstat (limited to 'aarch64/share/hadoop')
-rw-r--r--aarch64/share/hadoop/common/hadoop-common-2.2.0-tests.jarbin0 -> 1352335 bytes
-rw-r--r--aarch64/share/hadoop/common/hadoop-common-2.2.0.jarbin0 -> 2677324 bytes
-rw-r--r--aarch64/share/hadoop/common/hadoop-nfs-2.2.0.jarbin0 -> 139540 bytes
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop-core_0.20.0.xml32308
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop-core_0.21.0.xml25944
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop-core_0.22.0.xml28377
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.17.0.xml43272
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.18.1.xml44778
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.18.2.xml38788
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.18.3.xml38826
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.19.0.xml43972
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.19.1.xml44195
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.19.2.xml44204
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.20.0.xml52140
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.20.1.xml53832
-rw-r--r--aarch64/share/hadoop/common/jdiff/hadoop_0.20.2.xml53959
-rw-r--r--aarch64/share/hadoop/common/lib/activation-1.1.jarbin0 -> 62983 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/asm-3.2.jarbin0 -> 43398 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/avro-1.7.4.jarbin0 -> 303139 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-beanutils-1.7.0.jarbin0 -> 188671 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jarbin0 -> 206035 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-cli-1.2.jarbin0 -> 41123 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-codec-1.4.jarbin0 -> 58160 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-collections-3.2.1.jarbin0 -> 575389 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-compress-1.4.1.jarbin0 -> 241367 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-configuration-1.6.jarbin0 -> 298829 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-digester-1.8.jarbin0 -> 143602 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-el-1.0.jarbin0 -> 112341 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-httpclient-3.1.jarbin0 -> 305001 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-io-2.1.jarbin0 -> 163151 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-lang-2.5.jarbin0 -> 279193 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-logging-1.1.1.jarbin0 -> 60686 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-math-2.1.jarbin0 -> 832410 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/commons-net-3.1.jarbin0 -> 273370 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/guava-11.0.2.jarbin0 -> 1648200 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/hadoop-annotations-2.2.0.jarbin0 -> 16781 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/hadoop-auth-2.2.0.jarbin0 -> 49779 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jackson-core-asl-1.8.8.jarbin0 -> 227500 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jackson-jaxrs-1.8.8.jarbin0 -> 17884 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jackson-mapper-asl-1.8.8.jarbin0 -> 668564 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jackson-xc-1.8.8.jarbin0 -> 32353 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jasper-compiler-5.5.23.jarbin0 -> 408133 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jasper-runtime-5.5.23.jarbin0 -> 76844 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jaxb-api-2.2.2.jarbin0 -> 105134 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jarbin0 -> 890168 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jersey-core-1.9.jarbin0 -> 458739 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jersey-json-1.9.jarbin0 -> 147952 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jersey-server-1.9.jarbin0 -> 713089 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jets3t-0.6.1.jarbin0 -> 321806 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jettison-1.1.jarbin0 -> 67758 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jetty-6.1.26.jarbin0 -> 539912 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jetty-util-6.1.26.jarbin0 -> 177131 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jsch-0.1.42.jarbin0 -> 185746 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jsp-api-2.1.jarbin0 -> 100636 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/jsr305-1.3.9.jarbin0 -> 33015 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/junit-4.8.2.jarbin0 -> 237344 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/log4j-1.2.17.jarbin0 -> 489884 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/mockito-all-1.8.5.jarbin0 -> 1419869 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/netty-3.6.2.Final.jarbin0 -> 1199572 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/paranamer-2.3.jarbin0 -> 29555 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/protobuf-java-2.5.0.jarbin0 -> 533455 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/servlet-api-2.5.jarbin0 -> 105112 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/slf4j-api-1.7.5.jarbin0 -> 26084 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jarbin0 -> 8869 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/snappy-java-1.0.4.1.jarbin0 -> 995968 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/stax-api-1.0.1.jarbin0 -> 26514 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/xmlenc-0.52.jarbin0 -> 15010 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/xz-1.0.jarbin0 -> 94672 bytes
-rw-r--r--aarch64/share/hadoop/common/lib/zookeeper-3.4.5.jarbin0 -> 779974 bytes
-rw-r--r--aarch64/share/hadoop/common/sources/hadoop-common-2.2.0-sources.jarbin0 -> 1681090 bytes
-rw-r--r--aarch64/share/hadoop/common/sources/hadoop-common-2.2.0-test-sources.jarbin0 -> 746234 bytes
-rw-r--r--aarch64/share/hadoop/common/templates/core-site.xml20
-rw-r--r--aarch64/share/hadoop/hdfs/hadoop-hdfs-2.2.0-tests.jarbin0 -> 1988555 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/hadoop-hdfs-2.2.0.jarbin0 -> 5242564 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/hadoop-hdfs-nfs-2.2.0.jarbin0 -> 71670 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.20.0.xml10389
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.21.0.xml16220
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.22.0.xml18589
-rw-r--r--aarch64/share/hadoop/hdfs/lib/asm-3.2.jarbin0 -> 43398 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-cli-1.2.jarbin0 -> 41123 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-codec-1.4.jarbin0 -> 58160 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jarbin0 -> 24239 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-el-1.0.jarbin0 -> 112341 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-io-2.1.jarbin0 -> 163151 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-lang-2.5.jarbin0 -> 279193 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/commons-logging-1.1.1.jarbin0 -> 60686 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/guava-11.0.2.jarbin0 -> 1648200 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jackson-core-asl-1.8.8.jarbin0 -> 227500 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jackson-mapper-asl-1.8.8.jarbin0 -> 668564 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jarbin0 -> 76844 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jersey-core-1.9.jarbin0 -> 458739 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jersey-server-1.9.jarbin0 -> 713089 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jetty-6.1.26.jarbin0 -> 539912 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jetty-util-6.1.26.jarbin0 -> 177131 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jsp-api-2.1.jarbin0 -> 100636 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/jsr305-1.3.9.jarbin0 -> 33015 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/log4j-1.2.17.jarbin0 -> 489884 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/netty-3.6.2.Final.jarbin0 -> 1199572 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jarbin0 -> 533455 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/servlet-api-2.5.jarbin0 -> 105112 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/lib/xmlenc-0.52.jarbin0 -> 15010 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/sources/hadoop-hdfs-2.2.0-sources.jarbin0 -> 1979061 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/sources/hadoop-hdfs-2.2.0-test-sources.jarbin0 -> 1300644 bytes
-rw-r--r--aarch64/share/hadoop/hdfs/templates/hdfs-site.xml21
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/datanode/WEB-INF/web.xml59
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/datanode/robots.txt2
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/hdfs/WEB-INF/web.xml109
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/hdfs/decommission.xsl139
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/hdfs/dfsclusterhealth.xsl170
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/hdfs/dfsclusterhealth_utils.xsl88
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/hdfs/index.html35
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/journal/WEB-INF/web.xml39
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/journal/index.html29
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/secondary/WEB-INF/web.xml39
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/secondary/index.html29
-rw-r--r--aarch64/share/hadoop/hdfs/webapps/static/hadoop.css190
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/LICENSE707
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/NOTICE16
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/RELEASE-NOTES234
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/RUNNING.txt454
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/bootstrap.jarbin0 -> 22706 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/catalina-tasks.xml58
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/catalina.bat286
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/catalina.sh506
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/commons-daemon-native.tar.gzbin0 -> 202519 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/commons-daemon.jarbin0 -> 24242 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/cpappend.bat35
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/digest.bat56
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/digest.sh48
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/setclasspath.bat82
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/setclasspath.sh116
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/shutdown.bat59
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/shutdown.sh48
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/startup.bat59
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/startup.sh65
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/tomcat-juli.jarbin0 -> 32278 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/tomcat-native.tar.gzbin0 -> 258558 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/tool-wrapper.bat85
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/tool-wrapper.sh99
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/bin/version.bat59
-rwxr-xr-xaarch64/share/hadoop/httpfs/tomcat/bin/version.sh48
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/catalina.policy222
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/catalina.properties81
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/context.xml35
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/logging.properties67
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/server.xml150
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/tomcat-users.xml36
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/conf/web.xml1249
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/annotations-api.jarbin0 -> 15240 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/catalina-ant.jarbin0 -> 54565 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/catalina-ha.jarbin0 -> 132132 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/catalina-tribes.jarbin0 -> 237521 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/catalina.jarbin0 -> 1243752 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/ecj-3.7.2.jarbin0 -> 1749257 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/el-api.jarbin0 -> 33314 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/jasper-el.jarbin0 -> 112554 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/jasper.jarbin0 -> 527671 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/jsp-api.jarbin0 -> 76691 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/servlet-api.jarbin0 -> 88499 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/tomcat-coyote.jarbin0 -> 771696 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/tomcat-dbcp.jarbin0 -> 253633 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/tomcat-i18n-es.jarbin0 -> 70018 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/tomcat-i18n-fr.jarbin0 -> 51901 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/lib/tomcat-i18n-ja.jarbin0 -> 54509 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/temp/safeToDelete.tmp0
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/ROOT/WEB-INF/web.xml16
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/ROOT/index.html21
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/default-log4j.properties20
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/httpfs-default.xml237
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/httpfs.properties21
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$1.classbin0 -> 1136 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$2.classbin0 -> 1399 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$3.classbin0 -> 1761 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$4.classbin0 -> 2037 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$5.classbin0 -> 1863 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$6.classbin0 -> 1009 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$FILE_TYPE.classbin0 -> 2141 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$HttpFSDataInputStream.classbin0 -> 1589 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$HttpFSDataOutputStream.classbin0 -> 1406 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem$Operation.classbin0 -> 2723 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSFileSystem.classbin0 -> 24410 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator$DelegationTokenOperation.classbin0 -> 2203 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.classbin0 -> 7214 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.classbin0 -> 1339 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/client/HttpFSUtils.classbin0 -> 5333 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.classbin0 -> 3107 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSAppend.classbin0 -> 2191 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSConcat.classbin0 -> 1918 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSContentSummary.classbin0 -> 1827 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSCreate.classbin0 -> 2908 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSDelete.classbin0 -> 1988 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSFileChecksum.classbin0 -> 1807 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSFileStatus.classbin0 -> 1791 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSHomeDir.classbin0 -> 1848 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSListStatus.classbin0 -> 2294 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSMkdirs.classbin0 -> 2110 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSOpen.classbin0 -> 1997 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSRename.classbin0 -> 1963 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSSetOwner.classbin0 -> 1809 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSSetPermission.classbin0 -> 1918 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSSetReplication.classbin0 -> 2037 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations$FSSetTimes.classbin0 -> 1746 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/FSOperations.classbin0 -> 6459 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.classbin0 -> 3633 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.classbin0 -> 2899 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler$1.classbin0 -> 1290 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.classbin0 -> 7845 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$AccessTimeParam.classbin0 -> 969 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$BlockSizeParam.classbin0 -> 965 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$DataParam.classbin0 -> 946 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$DestinationParam.classbin0 -> 901 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$DoAsParam.classbin0 -> 1529 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$FilterParam.classbin0 -> 881 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$GroupParam.classbin0 -> 1007 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$LenParam.classbin0 -> 944 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$ModifiedTimeParam.classbin0 -> 981 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$OffsetParam.classbin0 -> 942 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$OperationParam.classbin0 -> 1385 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$OverwriteParam.classbin0 -> 966 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$OwnerParam.classbin0 -> 1007 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$PermissionParam.classbin0 -> 1006 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$RecursiveParam.classbin0 -> 966 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$ReplicationParam.classbin0 -> 966 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider$SourcesParam.classbin0 -> 885 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.classbin0 -> 4087 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.classbin0 -> 1028 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSServer$1.classbin0 -> 2000 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSServer.classbin0 -> 19404 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.classbin0 -> 3194 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/lang/RunnableCallable.classbin0 -> 2159 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/lang/XException$ERROR.classbin0 -> 261 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/lang/XException.classbin0 -> 2674 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/BaseService.classbin0 -> 3470 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/Server$Status.classbin0 -> 2043 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/Server.classbin0 -> 18236 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/ServerException$ERROR.classbin0 -> 3109 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/ServerException.classbin0 -> 1140 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/Service.classbin0 -> 915 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/server/ServiceException.classbin0 -> 910 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/DelegationTokenIdentifier.classbin0 -> 1206 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/DelegationTokenManager.classbin0 -> 1576 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/DelegationTokenManagerException$ERROR.classbin0 -> 2000 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/DelegationTokenManagerException.classbin0 -> 1106 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/FileSystemAccess$FileSystemExecutor.classbin0 -> 500 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/FileSystemAccess.classbin0 -> 1250 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/FileSystemAccessException$ERROR.classbin0 -> 2612 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/FileSystemAccessException.classbin0 -> 1070 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/Groups.classbin0 -> 575 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/Instrumentation$Cron.classbin0 -> 323 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/Instrumentation$Variable.classbin0 -> 364 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/Instrumentation.classbin0 -> 1417 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/ProxyUser.classbin0 -> 566 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/Scheduler.classbin0 -> 644 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$1.classbin0 -> 1400 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$2.classbin0 -> 1389 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$3.classbin0 -> 3036 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$4.classbin0 -> 1437 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$CachedFileSystem.classbin0 -> 1557 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService$FileSystemCachePurger.classbin0 -> 2630 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.classbin0 -> 15405 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$1.classbin0 -> 1316 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$2.classbin0 -> 1315 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$3.classbin0 -> 1317 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$Cron.classbin0 -> 1567 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$Sampler.classbin0 -> 2942 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$SamplersRunnable.classbin0 -> 1734 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$Timer.classbin0 -> 3008 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService$VariableHolder.classbin0 -> 2192 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.classbin0 -> 9958 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/scheduler/SchedulerService$1.classbin0 -> 3280 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/scheduler/SchedulerService.classbin0 -> 5109 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/security/DelegationTokenManagerService$DelegationTokenSecretManager.classbin0 -> 1345 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.classbin0 -> 7282 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/security/GroupsService.classbin0 -> 1812 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/security/ProxyUserService$ERROR.classbin0 -> 1983 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/service/security/ProxyUserService.classbin0 -> 7297 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.classbin0 -> 2345 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/servlet/HostnameFilter.classbin0 -> 2871 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/servlet/MDCFilter.classbin0 -> 2242 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/servlet/ServerWebApp.classbin0 -> 5411 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/util/Check.classbin0 -> 4014 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/util/ConfigurationUtils.classbin0 -> 5061 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/BooleanParam.classbin0 -> 1765 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/ByteParam.classbin0 -> 1354 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/EnumParam.classbin0 -> 2070 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/ExceptionProvider.classbin0 -> 3335 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/InputStreamEntity.classbin0 -> 1446 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/IntegerParam.classbin0 -> 1384 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/JSONMapProvider.classbin0 -> 3997 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/JSONProvider.classbin0 -> 4081 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/LongParam.classbin0 -> 1354 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/Param.classbin0 -> 2130 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/Parameters.classbin0 -> 1412 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/ParametersProvider.classbin0 -> 5689 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/ShortParam.classbin0 -> 1560 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/StringParam.classbin0 -> 2498 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/UserProvider$1.classbin0 -> 1015 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/UserProvider$UserParam.classbin0 -> 1337 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/classes/org/apache/hadoop/lib/wsrs/UserProvider.classbin0 -> 4065 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/activation-1.1.jarbin0 -> 62983 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/asm-3.2.jarbin0 -> 43398 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/avro-1.7.4.jarbin0 -> 303139 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-beanutils-1.7.0.jarbin0 -> 188671 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-beanutils-core-1.8.0.jarbin0 -> 206035 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-cli-1.2.jarbin0 -> 41123 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-codec-1.4.jarbin0 -> 58160 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-collections-3.2.1.jarbin0 -> 575389 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-compress-1.4.1.jarbin0 -> 241367 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-configuration-1.6.jarbin0 -> 298829 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-daemon-1.0.13.jarbin0 -> 24239 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-digester-1.8.jarbin0 -> 143602 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-io-2.1.jarbin0 -> 163151 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-lang-2.5.jarbin0 -> 279193 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-logging-1.1.1.jarbin0 -> 60686 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-math-2.1.jarbin0 -> 832410 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-net-3.1.jarbin0 -> 273370 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/guava-11.0.2.jarbin0 -> 1648200 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/hadoop-annotations-2.2.0.jarbin0 -> 16781 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/hadoop-auth-2.2.0.jarbin0 -> 49779 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/hadoop-common-2.2.0.jarbin0 -> 2677324 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/hadoop-hdfs-2.2.0.jarbin0 -> 5242564 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jackson-core-asl-1.8.8.jarbin0 -> 227500 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jackson-jaxrs-1.8.8.jarbin0 -> 17884 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jackson-mapper-asl-1.8.8.jarbin0 -> 668564 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jackson-xc-1.8.8.jarbin0 -> 32353 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jaxb-api-2.2.2.jarbin0 -> 105134 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jaxb-impl-2.2.3-1.jarbin0 -> 890168 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jersey-core-1.9.jarbin0 -> 458739 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jersey-json-1.9.jarbin0 -> 147952 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jersey-server-1.9.jarbin0 -> 713089 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jettison-1.1.jarbin0 -> 67758 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jsch-0.1.42.jarbin0 -> 185746 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/json-simple-1.1.jarbin0 -> 16046 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/jsr305-1.3.9.jarbin0 -> 33015 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/log4j-1.2.17.jarbin0 -> 489884 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/paranamer-2.3.jarbin0 -> 29555 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/protobuf-java-2.5.0.jarbin0 -> 533455 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/slf4j-api-1.7.5.jarbin0 -> 26084 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/slf4j-log4j12-1.7.5.jarbin0 -> 8869 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/snappy-java-1.0.4.1.jarbin0 -> 995968 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/stax-api-1.0.1.jarbin0 -> 26514 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/xmlenc-0.52.jarbin0 -> 15010 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/xz-1.0.jarbin0 -> 94672 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/zookeeper-3.4.5.jarbin0 -> 779974 bytes
-rw-r--r--aarch64/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/web.xml98
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.2.0.jarbin0 -> 482132 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.2.0.jarbin0 -> 656310 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.2.0.jarbin0 -> 1455462 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.2.0.jarbin0 -> 117197 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.2.0.jarbin0 -> 4057 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.2.0-tests.jarbin0 -> 1434955 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.2.0.jarbin0 -> 35209 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.2.0.jarbin0 -> 21538 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jarbin0 -> 270272 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib-examples/hsqldb-2.0.0.jarbin0 -> 1256297 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/aopalliance-1.0.jarbin0 -> 4467 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/asm-3.2.jarbin0 -> 43398 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/avro-1.7.4.jarbin0 -> 303139 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jarbin0 -> 241367 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/commons-io-2.1.jarbin0 -> 163151 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/guice-3.0.jarbin0 -> 710492 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/guice-servlet-3.0.jarbin0 -> 65012 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/hadoop-annotations-2.2.0.jarbin0 -> 16781 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/hamcrest-core-1.1.jarbin0 -> 76643 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/jackson-core-asl-1.8.8.jarbin0 -> 227500 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.8.8.jarbin0 -> 668564 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/javax.inject-1.jarbin0 -> 2497 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/jersey-core-1.9.jarbin0 -> 458739 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/jersey-guice-1.9.jarbin0 -> 14786 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/jersey-server-1.9.jarbin0 -> 713089 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/junit-4.10.jarbin0 -> 253160 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/log4j-1.2.17.jarbin0 -> 489884 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jarbin0 -> 1199572 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/paranamer-2.3.jarbin0 -> 29555 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jarbin0 -> 533455 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jarbin0 -> 995968 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/lib/xz-1.0.jarbin0 -> 94672 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-app-2.2.0-sources.jarbin0 -> 278860 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-app-2.2.0-test-sources.jarbin0 -> 144052 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-common-2.2.0-sources.jarbin0 -> 244744 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-common-2.2.0-test-sources.jarbin0 -> 24308 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-core-2.2.0-sources.jarbin0 -> 1008323 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-core-2.2.0-test-sources.jarbin0 -> 67089 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-hs-2.2.0-sources.jarbin0 -> 72681 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-hs-2.2.0-test-sources.jarbin0 -> 63255 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-hs-plugins-2.2.0-sources.jarbin0 -> 2394 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-hs-plugins-2.2.0-test-sources.jarbin0 -> 2352 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-jobclient-2.2.0-sources.jarbin0 -> 21193 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-jobclient-2.2.0-test-sources.jarbin0 -> 694739 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-shuffle-2.2.0-sources.jarbin0 -> 10600 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-client-shuffle-2.2.0-test-sources.jarbin0 -> 6453 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.2.0-sources.jarbin0 -> 695908 bytes
-rw-r--r--aarch64/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.2.0-test-sources.jarbin0 -> 12964 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-archives-2.2.0.jarbin0 -> 21487 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-datajoin-2.2.0.jarbin0 -> 14547 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-distcp-2.2.0.jarbin0 -> 80387 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-extras-2.2.0.jarbin0 -> 62040 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-gridmix-2.2.0.jarbin0 -> 215354 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-rumen-2.2.0.jarbin0 -> 277586 bytes
-rw-r--r--aarch64/share/hadoop/tools/lib/hadoop-streaming-2.2.0.jarbin0 -> 102790 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-archives-2.2.0-sources.jarbin0 -> 9636 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-archives-2.2.0-test-sources.jarbin0 -> 3185 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-datajoin-2.2.0-sources.jarbin0 -> 12200 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-datajoin-2.2.0-test-sources.jarbin0 -> 7197 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-distcp-2.2.0-sources.jarbin0 -> 59176 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-distcp-2.2.0-test-sources.jarbin0 -> 38610 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-extras-2.2.0-sources.jarbin0 -> 30647 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-extras-2.2.0-test-sources.jarbin0 -> 13893 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-gridmix-2.2.0-sources.jarbin0 -> 121404 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-gridmix-2.2.0-test-sources.jarbin0 -> 71676 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-rumen-2.2.0-sources.jarbin0 -> 170000 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-rumen-2.2.0-test-sources.jarbin0 -> 9314 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-streaming-2.2.0-sources.jarbin0 -> 71829 bytes
-rw-r--r--aarch64/share/hadoop/tools/sources/hadoop-streaming-2.2.0-test-sources.jarbin0 -> 76355 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-api-2.2.0.jarbin0 -> 1158740 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.2.0.jarbin0 -> 32509 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.2.0.jarbin0 -> 13299 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-client-2.2.0.jarbin0 -> 94754 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-common-2.2.0.jarbin0 -> 1301644 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-server-common-2.2.0.jarbin0 -> 175522 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.2.0.jarbin0 -> 467789 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.2.0.jarbin0 -> 615701 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-server-tests-2.2.0.jarbin0 -> 2137 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.2.0.jarbin0 -> 25701 bytes
-rw-r--r--aarch64/share/hadoop/yarn/hadoop-yarn-site-2.2.0.jarbin0 -> 1930 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib-examples/hsqldb-2.0.0.jarbin0 -> 1256297 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/aopalliance-1.0.jarbin0 -> 4467 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/asm-3.2.jarbin0 -> 43398 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/avro-1.7.4.jarbin0 -> 303139 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/commons-compress-1.4.1.jarbin0 -> 241367 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/commons-io-2.1.jarbin0 -> 163151 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/guice-3.0.jarbin0 -> 710492 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/guice-servlet-3.0.jarbin0 -> 65012 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/hadoop-annotations-2.2.0.jarbin0 -> 16781 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/hamcrest-core-1.1.jarbin0 -> 76643 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/jackson-core-asl-1.8.8.jarbin0 -> 227500 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/jackson-mapper-asl-1.8.8.jarbin0 -> 668564 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/javax.inject-1.jarbin0 -> 2497 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/jersey-core-1.9.jarbin0 -> 458739 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/jersey-guice-1.9.jarbin0 -> 14786 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/jersey-server-1.9.jarbin0 -> 713089 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/junit-4.10.jarbin0 -> 253160 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/log4j-1.2.17.jarbin0 -> 489884 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/netty-3.6.2.Final.jarbin0 -> 1199572 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/paranamer-2.3.jarbin0 -> 29555 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/protobuf-java-2.5.0.jarbin0 -> 533455 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/snappy-java-1.0.4.1.jarbin0 -> 995968 bytes
-rw-r--r--aarch64/share/hadoop/yarn/lib/xz-1.0.jarbin0 -> 94672 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-api-2.2.0-sources.jarbin0 -> 360318 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-applications-distributedshell-2.2.0-sources.jarbin0 -> 19273 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-applications-distributedshell-2.2.0-test-sources.jarbin0 -> 6355 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-applications-unmanaged-am-launcher-2.2.0-sources.jarbin0 -> 6265 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-applications-unmanaged-am-launcher-2.2.0-test-sources.jarbin0 -> 4941 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-client-2.2.0-sources.jarbin0 -> 59384 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-client-2.2.0-test-sources.jarbin0 -> 35662 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-common-2.2.0-sources.jarbin0 -> 634756 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-common-2.2.0-test-sources.jarbin0 -> 79714 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-common-2.2.0-sources.jarbin0 -> 76814 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-common-2.2.0-test-sources.jarbin0 -> 7884 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-nodemanager-2.2.0-sources.jarbin0 -> 262437 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-nodemanager-2.2.0-test-sources.jarbin0 -> 158721 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-resourcemanager-2.2.0-sources.jarbin0 -> 387489 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-resourcemanager-2.2.0-test-sources.jarbin0 -> 246635 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-tests-2.2.0-test-sources.jarbin0 -> 18425 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-web-proxy-2.2.0-sources.jarbin0 -> 17741 bytes
-rw-r--r--aarch64/share/hadoop/yarn/sources/hadoop-yarn-server-web-proxy-2.2.0-test-sources.jarbin0 -> 5907 bytes
-rw-r--r--aarch64/share/hadoop/yarn/test/hadoop-yarn-server-tests-2.2.0-tests.jarbin0 -> 35375 bytes
467 files changed, 596135 insertions, 0 deletions
diff --git a/aarch64/share/hadoop/common/hadoop-common-2.2.0-tests.jar b/aarch64/share/hadoop/common/hadoop-common-2.2.0-tests.jar
new file mode 100644
index 0000000..efe3964
--- /dev/null
+++ b/aarch64/share/hadoop/common/hadoop-common-2.2.0-tests.jar
Binary files differ
diff --git a/aarch64/share/hadoop/common/hadoop-common-2.2.0.jar b/aarch64/share/hadoop/common/hadoop-common-2.2.0.jar
new file mode 100644
index 0000000..5fb45d8
--- /dev/null
+++ b/aarch64/share/hadoop/common/hadoop-common-2.2.0.jar
Binary files differ
diff --git a/aarch64/share/hadoop/common/hadoop-nfs-2.2.0.jar b/aarch64/share/hadoop/common/hadoop-nfs-2.2.0.jar
new file mode 100644
index 0000000..f68b5dd
--- /dev/null
+++ b/aarch64/share/hadoop/common/hadoop-nfs-2.2.0.jar
Binary files differ
diff --git a/aarch64/share/hadoop/common/jdiff/hadoop-core_0.20.0.xml b/aarch64/share/hadoop/common/jdiff/hadoop-core_0.20.0.xml
new file mode 100644
index 0000000..82bba33
--- /dev/null
+++ b/aarch64/share/hadoop/common/jdiff/hadoop-core_0.20.0.xml
@@ -0,0 +1,32308 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Sun May 31 20:29:52 PDT 2009 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop-core 0.20.0"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/gkesavan/release-0.20.0/build/classes:/home/gkesavan/release-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar:/home/gkesavan/release-0.20.0/lib/hsqldb-1.8.0.10.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-2.1.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar:/home/gkesavan/release-0.20.0/lib/kfs-0.2.2.jar:/home/gkesavan/release-0.20.0/conf:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/gkesavan/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/gkesavan/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/gkesavan/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/gkesavan/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/gkesavan/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/gkesavan/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/gkesavan/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/gkesavan/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/gkesavan/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/gkesavan/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/gkesavan/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/gkesavan/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-launcher.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-resolver.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-starteam.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-netrexx.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-testutil.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jai.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-swing.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jmf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bcel.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jdepend.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jsch.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bsf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-antlr.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-weblogic.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-junit.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-log4j.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xercesImpl.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-oro.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-trax.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-nodeps.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-logging.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-regexp.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-stylebook.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-javamail.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-net.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xml-apis.jar:/home/gkesavan/tools/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/gkesavan/release-0.20.0/src/core -apidir /home/gkesavan/release-0.20.0/lib/jdiff -apiname Hadoop-core 0.20.1-dev -->
+<package name="org.apache.hadoop">
+ <!-- start interface org.apache.hadoop.HadoopVersionAnnotation -->
+ <interface name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <method name="version" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version
+ @return the version string "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="user" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the username that compiled Hadoop.]]>
+ </doc>
+ </method>
+ <method name="date" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the date when Hadoop was compiled.
+ @return the date in unix 'date' format]]>
+ </doc>
+ </method>
+ <method name="url" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the url for the subversion repository.]]>
+ </doc>
+ </method>
+ <method name="revision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision.
+ @return the revision number as a string (eg. "451451")]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration where the behavior of reading from the default
+ resources can be turned off.
+
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addDefaultResource"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a default resource. Resources are loaded in the order of the resources
+ added.
+ @param name file name. File should be present in the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param in InputStream to deserialize the object from.]]>
+ </doc>
+ </method>
+ <method name="reloadConfiguration"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added
+ resources, and final parameters. This will make the resources to
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="setIfUnset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets a property if it is currently unset.
+ @param name the property name
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>float</code>.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="setBooleanIfUnset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the given property, if it is currently unset.
+ @param name property name
+ @param value new value]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClasses" return="java.lang.Class[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <param name="xface" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class"/>
+ <param name="xface" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clears all keys from the configuration.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quietness-mode.
+
+ In the quiet-mode, error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt><a href="{@docRoot}/../core-default.html">core-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>core-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+ <doc>
+ <![CDATA[Configuration of system parameters.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="honorSymLinkConf" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @param honorSymLinkConf if this is false, then the symlinks are not
+ created even if conf says so (this is required for an optimization in task
+ launches
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link org.apache.hadoop.mapred.JobConf}.
+ The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+ or {@link org.apache.hadoop.mapred.Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, network topology, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getTopologyPaths" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of network topology paths for each of the hosts.
+ The last component of the path is the host.]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="setTopologyPaths"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="topologyPaths" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the network topology paths of the hosts]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to verify checksum.]]>
+ </doc>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="getSpaceConsumed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retuns (disk) space consumed]]>
+ </doc>
+ </method>
+ <method name="getSpaceQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns (disk) space quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileChecksum -->
+ <class name="FileChecksum" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The checksum algorithm name]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The length of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The value of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if both the algorithms and the values are the same.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract class representing file checksums for files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+
+ @param f The file path
+ @return The file checksum. The default return value is null,
+ which indicates that no checksum algorithm is implemented
+ in the corresponding FileSystem.]]>
+ </doc>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[Set the verify checksum flag. This is only applicable if the
+ corresponding FileSystem supports checksum. By default doesn't do anything.
+ @param verifyChecksum]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set access time of a file
+ @param p The path
+ @param mtime Set the modification time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set access time.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="java.util.Map"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="use {@link #getAllStatistics} instead">
+ <doc>
+ <![CDATA[Get the Map of Statistics object indexed by URI Scheme.
+ @return a Map having a key as URI scheme and value as Statistics object
+ @deprecated use {@link #getAllStatistics} instead]]>
+ </doc>
+ </method>
+ <method name="getAllStatistics" return="java.util.List"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the FileSystem classes that have Statistics]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scheme" type="java.lang.String"/>
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="clearStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the counts of bytes to 0.]]>
+ </doc>
+ </method>
+ <method name="getScheme" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri scheme associated with this statistics object.
+ @return the schema associated with this set of statistics]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link FileSystem#delete(Path, boolean)}">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException
+ @deprecated Use {@link FileSystem#delete(Path, boolean)}]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <param name="makeCanonicalPath" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @param makeCanonicalPath
+ Whether to make canonical path for the file passed
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="checksum2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="checksum" type="byte[]"/>
+ <doc>
+ <![CDATA[Convert a checksum byte array to a long]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param verifyChecksum whether to verify checksum
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="convertToByteStream" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Converts a checksum integer value to a byte stream]]>
+ </doc>
+ </method>
+ <method name="resetChecksumChunk"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Resets existing buffer with a new one of the specified size.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link org.apache.hadoop.util.StringUtils#byteDesc} instead.">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length
+ @deprecated Consider using {@link org.apache.hadoop.util.StringUtils#byteDesc} instead.]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link org.apache.hadoop.util.StringUtils#limitDecimalTo2} instead.">
+ <param name="d" type="double"/>
+ <doc>
+ <![CDATA[@deprecated Consider using {@link org.apache.hadoop.util.StringUtils#limitDecimalTo2} instead.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRaw" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <class name="MD5MD5CRC32FileChecksum" extends="org.apache.hadoop.fs.FileChecksum"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5MD5CRC32FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Same as this(0, 0, null)]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5MD5CRC32FileChecksum" type="int, long, org.apache.hadoop.io.MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a MD5FileChecksum]]>
+ </doc>
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xml" type="org.znerd.xmlenc.XMLOutputter"/>
+ <param name="that" type="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write that object to xml output.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <exception name="SAXException" type="org.xml.sax.SAXException"/>
+ <doc>
+ <![CDATA[Return the object represented in the attributes.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[MD5 of MD5 of CRC32.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <constructor name="Trash" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor for the FileSystem provided.]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+ <doc>
+ <![CDATA[An abstract file system API.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <doc>
+ <![CDATA[<h1>A client for the Kosmos filesystem (KFS)</h1>
+
+<h3>Introduction</h3>
+
+This pages describes how to use Kosmos Filesystem
+(<a href="http://kosmosfs.sourceforge.net"> KFS </a>) as a backing
+store with Hadoop. This page assumes that you have downloaded the
+KFS software and installed necessary binaries as outlined in the KFS
+documentation.
+
+<h3>Steps</h3>
+
+ <ul>
+ <li>In the Hadoop conf directory edit core-site.xml,
+ add the following:
+ <pre>
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.impl&lt;/name&gt;
+ &lt;value&gt;org.apache.hadoop.fs.kfs.KosmosFileSystem&lt;/value&gt;
+ &lt;description&gt;The FileSystem for kfs: uris.&lt;/description&gt;
+&lt;/property&gt;
+ </pre>
+
+ <li>In the Hadoop conf directory edit core-site.xml,
+ adding the following (with appropriate values for
+ &lt;server&gt; and &lt;port&gt;):
+ <pre>
+&lt;property&gt;
+ &lt;name&gt;fs.default.name&lt;/name&gt;
+ &lt;value&gt;kfs://&lt;server:port&gt;&lt;/value&gt;
+&lt;/property&gt;
+
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.metaServerHost&lt;/name&gt;
+ &lt;value&gt;&lt;server&gt;&lt;/value&gt;
+ &lt;description&gt;The location of the KFS meta server.&lt;/description&gt;
+&lt;/property&gt;
+
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.metaServerPort&lt;/name&gt;
+ &lt;value&gt;&lt;port&gt;&lt;/value&gt;
+ &lt;description&gt;The location of the meta server's port.&lt;/description&gt;
+&lt;/property&gt;
+
+</pre>
+ </li>
+
+ <li>Copy KFS's <i> kfs-0.1.jar </i> to Hadoop's lib directory. This step
+ enables Hadoop's to load the KFS specific modules. Note
+ that, kfs-0.1.jar was built when you compiled KFS source
+ code. This jar file contains code that calls KFS's client
+ library code via JNI; the native code is in KFS's <i>
+ libkfsClient.so </i> library.
+ </li>
+
+ <li> When the Hadoop map/reduce trackers start up, those
+processes (on local as well as remote nodes) will now need to load
+KFS's <i> libkfsClient.so </i> library. To simplify this process, it is advisable to
+store libkfsClient.so in an NFS accessible directory (similar to where
+Hadoop binaries/scripts are stored); then, modify Hadoop's
+conf/hadoop-env.sh adding the following line and providing suitable
+value for &lt;path&gt;:
+<pre>
+export LD_LIBRARY_PATH=&lt;path&gt;
+</pre>
+
+
+ <li>Start only the map/reduce trackers
+ <br />
+ example: execute Hadoop's bin/start-mapred.sh</li>
+ </ul>
+<br/>
+
+If the map/reduce job trackers start up, all file-I/O is done to KFS.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.security.AccessControlException}
+ instead.">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param cause the cause (which is saved for later retrieval by the
+ {@link #getCause()} method). (A <tt>null</tt> value is
+ permitted, and indicates that the cause is nonexistent or
+ unknown.)]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.
+ @deprecated Use {@link org.apache.hadoop.security.AccessControlException}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="NONE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_WRITE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALL" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <doc>
+ <![CDATA[<p>A distributed, block-based implementation of {@link
+org.apache.hadoop.fs.FileSystem} that uses <a href="http://aws.amazon.com/s3">Amazon S3</a>
+as a backing store.</p>
+
+<p>
+Files are stored in S3 as blocks (represented by
+{@link org.apache.hadoop.fs.s3.Block}), which have an ID and a length.
+Block metadata is stored in S3 as a small record (represented by
+{@link org.apache.hadoop.fs.s3.INode}) using the URL-encoded
+path string as a key. Inodes record the file type (regular file or directory) and the list of blocks.
+This design makes it easy to seek to any given position in a file by reading the inode data to compute
+which block to access, then using S3's support for
+<a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.2">HTTP Range</a> headers
+to start streaming from the correct position.
+Renames are also efficient since only the inode is moved (by a DELETE followed by a PUT since
+S3 does not support renames).
+</p>
+<p>
+For a single file <i>/dir1/file1</i> which takes two blocks of storage, the file structure in S3
+would be something like this:
+</p>
+<pre>
+/
+/dir1
+/dir1/file1
+block-6415776850131549260
+block-3026438247347758425
+</pre>
+<p>
+Inodes start with a leading <code>/</code>, while blocks are prefixed with <code>block-</code>.
+</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <doc>
+ <![CDATA[<p>
+A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem} for reading and writing files on
+<a href="http://aws.amazon.com/s3">Amazon S3</a>.
+Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem}, which is block-based,
+this implementation stores
+files on S3 in their native form for interoperability with other S3 tools.
+</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.http">
+ <!-- start interface org.apache.hadoop.http.FilterContainer -->
+ <interface name="FilterContainer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[Add a filter to the container.
+ @param name Filter name
+ @param classname Filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <method name="addGlobalFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[Add a global filter to the container.
+ @param name filter name
+ @param classname filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A container class for javax.servlet.Filter.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.http.FilterContainer -->
+ <!-- start class org.apache.hadoop.http.FilterInitializer -->
+ <class name="FilterInitializer" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterInitializer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Initialize a javax.servlet.Filter.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.FilterInitializer -->
+ <!-- start class org.apache.hadoop.http.HttpServer -->
+ <class name="HttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.http.FilterContainer"/>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(name, bindAddress, port, findPort, null);]]>
+ </doc>
+ </constructor>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.
+ @param conf Configuration]]>
+ </doc>
+ </constructor>
+ <method name="createBaseListener" return="org.mortbay.jetty.Connector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a required listener for the Jetty instance listening on the port
+ provided. This wrapper and all subclasses must create at least one
+ listener.]]>
+ </doc>
+ </method>
+ <method name="addDefaultApps"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="parent" type="org.mortbay.jetty.handler.ContextHandlerCollection"/>
+ <param name="appDir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add default apps.
+ @param appDir The application directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addDefaultServlets"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Add default servlets.]]>
+ </doc>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ctxt" type="org.mortbay.jetty.servlet.Context"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="dir" type="java.lang.String"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a context
+ @param pathSpec The path spec for the context
+ @param dir The directory containing the context
+ @param isFiltered if true, the servlet is added to the filter path mapping
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class]]>
+ </doc>
+ </method>
+ <method name="addInternalServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="this is a temporary method">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add an internal servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class
+ @deprecated this is a temporary method]]>
+ </doc>
+ </method>
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addGlobalFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="defineFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ctx" type="org.mortbay.jetty.servlet.Context"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <param name="urls" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Define a filter for a context and set up default url mappings.]]>
+ </doc>
+ </method>
+ <method name="addFilterPathMapping"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="webAppCtx" type="org.mortbay.jetty.servlet.Context"/>
+ <doc>
+ <![CDATA[Add the path spec to the filter path mapping.
+ @param pathSpec The path spec
+ @param webAppCtx The WebApplicationContext to add to]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getWebAppsPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the pathname to the webapps files.
+ @return the pathname as a URL
+ @throws IOException if 'webapps' directory cannot be found on CLASSPATH.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ <doc>
+ <![CDATA[Set the min, max number of worker threads (simultaneous connections).]]>
+ </doc>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key
+ @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}]]>
+ </doc>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="sslConf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="needClientAuth" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param sslConf conf to retrieve ssl options
+ @param needClientAuth whether client authentication is required]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webServer" type="org.mortbay.jetty.Server"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="listener" type="org.mortbay.jetty.Connector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="webAppContext" type="org.mortbay.jetty.webapp.WebAppContext"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="findPort" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="defaultContexts" type="java.util.Map"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="filterNames" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer -->
+ <!-- start class org.apache.hadoop.http.HttpServer.StackServlet -->
+ <class name="HttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer.StackServlet -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BinaryComparable -->
+ <class name="BinaryComparable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="BinaryComparable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return n st bytes 0..n-1 from {#getBytes()} are valid.]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return representative byte array for this instance.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.BinaryComparable"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()} to those provided.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if bytes from {#getBytes()} match.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a hash of the bytes returned from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface supported by {@link org.apache.hadoop.io.WritableComparable}
+ types supporting ordering/permutation by a representative set of bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BinaryComparable -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile -->
+ <class name="BloomMapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="BLOOM_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HASH_COUNT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class extends {@link MapFile} and provides very much the same
+ functionality. However, it uses dynamic Bloom filters to provide
+ quick membership test for keys, and it offers a fast version of
+ {@link Reader#get(WritableComparable, Writable)} operation, especially in
+ case of sparsely populated MapFile-s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile.Reader -->
+ <class name="BloomMapFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="probablyHasKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if this MapFile has the indicated key. The membership test is
+ performed using a Bloom filter, so the result has always non-zero
+ probability of false positives.
+ @param key key to check
+ @return false iff key doesn't exist, true if key probably exists.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fast version of the
+ {@link MapFile.Reader#get(WritableComparable, Writable)} method. First
+ it checks the Bloom filter for the existence of the key, and only if
+ present it performs the real get operation. This yields significant
+ performance improvements for get operations on sparsely populated files.]]>
+ </doc>
+ </method>
+ <method name="getBloomFilter" return="org.apache.hadoop.util.bloom.Filter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the Bloom filter used by this instance of the Reader.
+ @return a Bloom filter (see {@link Filter})]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile.Writer -->
+ <class name="BloomMapFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getLength() - 1.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getBytes()} instead.">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @deprecated Use {@link #getBytes()} instead.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getLength()} instead.">
+ <doc>
+ <![CDATA[Get the current size of the buffer.
+ @deprecated Use {@link #getLength()} instead.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <method name="writeTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to a file stream]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="java.lang.Object"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="java.lang.Object[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a hash value for the content from the InputStream.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NONE" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do not compress records.]]>
+ </doc>
+ </field>
+ <field name="RECORD" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compress values only, each separately.]]>
+ </doc>
+ </field>
+ <field name="BLOCK" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compress sequences of records together in blocks.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.ap