aboutsummaryrefslogtreecommitdiff
path: root/aarch64/share/hadoop/hdfs/jdiff
diff options
context:
space:
mode:
Diffstat (limited to 'aarch64/share/hadoop/hdfs/jdiff')
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.20.0.xml10389
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.21.0.xml16220
-rw-r--r--aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.22.0.xml18589
3 files changed, 45198 insertions, 0 deletions
diff --git a/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.20.0.xml b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.20.0.xml
new file mode 100644
index 0000000..823c3d8
--- /dev/null
+++ b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.20.0.xml
@@ -0,0 +1,10389 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Sun May 31 20:46:08 PDT 2009 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop-hdfs 0.20.0"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/gkesavan/release-0.20.0/build/classes:/home/gkesavan/release-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar:/home/gkesavan/release-0.20.0/lib/hsqldb-1.8.0.10.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-2.1.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar:/home/gkesavan/release-0.20.0/lib/kfs-0.2.2.jar:/home/gkesavan/release-0.20.0/conf:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/gkesavan/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/gkesavan/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/gkesavan/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/gkesavan/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/gkesavan/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/gkesavan/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/gkesavan/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/gkesavan/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/gkesavan/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/gkesavan/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/gkesavan/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/gkesavan/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-launcher.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-resolver.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-starteam.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-netrexx.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-testutil.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jai.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-swing.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jmf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bcel.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jdepend.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jsch.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bsf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-antlr.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-weblogic.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-junit.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-log4j.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xercesImpl.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-oro.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-trax.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-nodeps.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-logging.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-regexp.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-stylebook.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-javamail.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-net.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xml-apis.jar:/home/gkesavan/tools/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/gkesavan/release-0.20.0/src/hdfs -apidir /home/gkesavan/release-0.20.0/lib/jdiff -apiname hadoop 0.20.1-dev -->
+<package name="org.apache.hadoop.hdfs">
+ <!-- start class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
+ <class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumDistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
+ Note that as of now (May 07), DistributedFileSystem natively checksums
+ all of its data. Using this class is not be necessary in most cases.
+ Currently provided mainly for backward compatibility and testing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient -->
+ <class name="DFSClient" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.io.Closeable"/>
+ <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new DFSClient connected to the default namenode.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new DFSClient connected to the given namenode server.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file system, abandoning all of the leases and files being
+ created and close connections to the namenode.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default block size for this cluster
+ @return the default block size in bytes]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report corrupt blocks that were discovered by the client.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHints" return="java.lang.String[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getBlockLocations instead
+
+ Get hints about the location of the indicated block(s).
+
+ getHints() returns a list of hostnames that store data for
+ a specific file region. It returns a set of hostnames for
+ every block within the indicated region.
+
+ This function is very useful when writing code that considers
+ data-placement when performing operations. For example, the
+ MapReduce system tries to schedule tasks on the same machines
+ as the data-block the task processes.">
+ <param name="src" type="java.lang.String"/>
+ <param name="start" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getBlockLocations instead
+
+ Get hints about the location of the indicated block(s).
+
+ getHints() returns a list of hostnames that store data for
+ a specific file region. It returns a set of hostnames for
+ every block within the indicated region.
+
+ This function is very useful when writing code that considers
+ data-placement when performing operations. For example, the
+ MapReduce system tries to schedule tasks on the same machines
+ as the data-block the task processes.]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="start" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get block location info about file
+
+ getBlockLocations() returns a list of hostnames that store
+ data for a specific file region. It returns a set of hostnames
+ for every block within the indicated region.
+
+ This function is very useful when writing code that considers
+ data-placement when performing operations. For example, the
+ MapReduce system tries to schedule tasks on the same machines
+ as the data-block the task processes.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSClient.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file and return an output stream for writing into it.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @return output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file and return an output stream for writing into it
+ with write-progress reporting.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @return output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ and return an output stream for writing into the file.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @param replication block replication
+ @return output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ with write-progress reporting and return an output stream for writing
+ into the file.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @param replication block replication
+ @return output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call
+ {@link #create(String,FsPermission,boolean,short,long,Progressable,int)}
+ with default permission.
+ @see FsPermission#getDefault()]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ with write-progress reporting and return an output stream for writing
+ into the file.
+
+ @param src stream name
+ @param permission The permission of the directory being created.
+ If permission == null, use {@link FsPermission#getDefault()}.
+ @param overwrite do not check for file existence if true
+ @param replication block replication
+ @return output stream
+ @throws IOException
+ @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @see ClientProtocol#setReplication(String, short)
+ @param replication
+ @throws IOException
+ @return true is successful or false if file does not exist]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename file or directory.
+ See {@link ClientProtocol#rename(String, String)}.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete file or directory.
+ See {@link ClientProtocol#delete(String)}.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[delete file or directory.
+ delete contents of the directory if non empty and recursive
+ set to true]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implemented using getFileInfo(src)]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="socketFactory" type="javax.net.SocketFactory"/>
+ <param name="socketTimeout" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+ @param src The file path
+ @return The checksum]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions to a file or directory.
+ @param src path name.
+ @param permission
+ @throws <code>FileNotFoundException</code> is file does not exist.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set file or directory owner.
+ @param src path name.
+ @param username user id.
+ @param groupname user group.
+ @throws <code>FileNotFoundException</code> is file does not exist.]]>
+ </doc>
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="totalRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="totalRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+ See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
+ for more details.
+
+ @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
+ See {@link ClientProtocol#refreshNodes()}
+ for more details.
+
+ @see ClientProtocol#refreshNodes()]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ See {@link ClientProtocol#metaSave(String)}
+ for more details.
+
+ @see ClientProtocol#metaSave(String)]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param permission The permission of the directory being created.
+ If permission == null, use {@link FsPermission#getDefault()}.
+ @return True if the operation success.
+ @see ClientProtocol#mkdirs(String, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[set the modification and access time of a file
+ @throws FileNotFoundException if the path is not a file]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DFSClient can connect to a Hadoop Filesystem and
+ perform basic file tasks. It uses the ClientProtocol
+ to communicate with a NameNode daemon, and connects
+ directly to DataNodes to read/write block data.
+
+ Hadoop DFS users should obtain an instance of
+ DistributedFileSystem, which uses DFSClient to handle
+ filesystem tasks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
+ <class name="DFSClient.BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ </method>
+ <method name="readChunk" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksumBuf" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Java Doc required]]>
+ </doc>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[kind of like readFully(). Only reads as much as possible.
+ And allows use of protected readFully().]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a wrapper around connection to datadone
+ and understands checksum, offset etc]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
+ <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
+ <class name="DFSUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isValidName" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
+ and names which contain a ":" or "/"]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Permit paths which explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Normalize paths that explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get rid of Path f, whether a true file or dir.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[requires a boolean check to delete a non
+ empty directory recursively.]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set a directory's quotas
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the disk usage of the filesystem, including total capacity,
+ used space, and remaining space]]>
+ </doc>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
+ FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
+ files.]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.
+ @throws FileNotFoundException if the file does not exist.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implementation of the abstract FileSystem for the DFS system.
+ This object is the way end-user code interacts with a Hadoop
+ DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HDFSPolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="pickOneAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="hostname" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[randomly pick one from all available IP addresses of a given hostname]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
+ @param path The path component of the URL
+ @param query The query component of the URL]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="nnAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ran" type="java.util.Random"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="df" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTP.
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HsftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTPS.
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="javax.net.ssl.HostnameVerifier"/>
+ <constructor name="HsftpFileSystem.DummyHostnameVerifier"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="verify" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostname" type="java.lang.String"/>
+ <param name="session" type="javax.net.ssl.SSLSession"/>
+ </method>
+ <doc>
+ <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+ <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception that happens when you ask to create a file that already
+ is being created, but is not closed yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="java.io.File, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the blockid from the given filename]]>
+ </doc>
+ </constructor>
+ <method name="isBlockFilename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.io.File"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <param name="len" type="long"/>
+ <param name="genStamp" type="long"/>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setBlockId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bid" type="long"/>
+ </method>
+ <method name="getBlockName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytes" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setNumBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="GRANDFATHER_GENERATION_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Block is a Hadoop FS primitive, identified by a
+ long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <class name="BlockListAsLongs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockListAsLongs" type="long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param iBlockList - BlockListALongs create from this long[] parameter]]>
+ </doc>
+ </constructor>
+ <method name="convertToArrayLongs" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockArray" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <doc>
+ <![CDATA[Converting a block[] to a long[]
+ @param blockArray - the input array block[]
+ @return the output array of long[]]]>
+ </doc>
+ </method>
+ <method name="getNumberOfBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of blocks
+ @return - the number of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-id of the indexTh block
+ @param index - the block whose block-id is desired
+ @return the block-id]]>
+ </doc>
+ </method>
+ <method name="getBlockLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-len of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the block-len]]>
+ </doc>
+ </method>
+ <method name="getBlockGenStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The generation stamp of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the generation stamp]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides an interface for accessing list of blocks that
+ has been implemented as long[].
+ This class is usefull for block report. Rather than send block reports
+ as a Block[] we can send it as a long[].]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <interface name="ClientDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="keepLength" type="boolean"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start generation-stamp recovery for specified block
+ @param block the specified block
+ @param keepLength keep the block length
+ @param targets the list of possible locations of specified block
+ @return the new blockid if recovery successful and the generation stamp
+ got updated as part of the recovery, else returns null if the block id
+ not have any data and the block was deleted.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[3: add keepLength parameter.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An client-datanode protocol for block recovery]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <interface name="ClientProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get locations of the blocks of the specified file within the specified range.
+ DataNode locations for each block are sorted by
+ the proximity to the client.
+ <p>
+ Return {@link LocatedBlocks} which contains
+ file length, blocks and their locations.
+ DataNode locations for each block are sorted by
+ the distance to the client's address.
+ <p>
+ The client will then have to contact
+ one of the indicated DataNodes to obtain the actual data.
+
+ @param src file name
+ @param offset range start offset
+ @param length range length
+ @return file length and array of blocks with their locations
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new file entry in the namespace.
+ <p>
+ This will create an empty file specified by the source path.
+ The path should reflect a full path originated at the root.
+ The name-node does not have a notion of "current" directory for a client.
+ <p>
+ Once created, the file is visible and available for read to other clients.
+ Although, other clients cannot {@link #delete(String)}, re-create or
+ {@link #rename(String, String)} it until the file is completed
+ or explicitly as a result of lease expiration.
+ <p>
+ Blocks have a maximum size. Clients that intend to
+ create multi-block files must also use {@link #addBlock(String, String)}.
+
+ @param src path of the file being created.
+ @param masked masked permission.
+ @param clientName name of the current client.
+ @param overwrite indicates whether the file should be
+ overwritten if it already exists.
+ @param replication block replication factor.
+ @param blockSize maximum block size.
+
+ @throws AccessControlException if permission to create file is
+ denied by the system. As usually on the client side the exception will
+ be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ @throws QuotaExceededException if the file creation violates
+ any quota restriction
+ @throws IOException if other errors occur.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to the end of the file.
+ @param src path of the file being created.
+ @param clientName name of the current client.
+ @return information about the last partial block if any.
+ @throws AccessControlException if permission to append file is
+ denied by the system. As usually on the client side the exception will
+ be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ Allows appending to an existing file if the server is
+ configured with the parameter dfs.support.append set to true, otherwise
+ throws an IOException.
+ @throws IOException if other errors occur.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ <p>
+ The NameNode sets replication to the new value and returns.
+ The actual block replication is not expected to be performed during
+ this method call. The blocks will be populated or removed in the
+ background as the result of the routine block maintenance procedures.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file/directory.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param src
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client can give up on a blcok by calling abandonBlock().
+ The client can then
+ either obtain a new block, or complete or abandon the file.
+ Any partial writes to the block will be discarded.]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A client that wants to write an additional block to the
+ indicated filename (which must currently be open for writing)
+ should call addBlock().
+
+ addBlock() allocates a new block and datanodes the block data
+ should be replicated to.
+
+ @return LocatedBlock allocated block information.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client is done writing data to the given filename, and would
+ like to complete it.
+
+ The function returns whether the file has been closed successfully.
+ If the function returns false, the caller should try again.
+
+ A call to complete() will not return true until all the file's
+ blocks have been replicated the minimum number of times. Thus,
+ DataNode failures may cause a client to call complete() several
+ times before succeeding.]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client wants to report corrupted blocks (blocks with specified
+ locations on datanodes).
+ @param blocks Array of located blocks to report]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename an item in the file system namespace.
+
+ @param src existing file or directory name.
+ @param dst new name.
+ @return true if successful, or false if the old name does not exist
+ or if the new name already belongs to the namespace.
+ @throws IOException if the new name is invalid.
+ @throws QuotaExceededException if the rename would violate
+ any quota restriction]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ Any blocks belonging to the deleted files will be garbage-collected.
+
+ @param src existing name.
+ @return true only if the existing file or directory was actually removed
+ from the file system.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ same as delete but provides a way to avoid accidentally
+ deleting non empty directories programmatically.
+ @param src existing name
+ @param recursive if true deletes a non empty directory recursively,
+ else throws an exception.
+ @return true only if the existing file or directory was actually removed
+ from the file system.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param masked The masked permission of the directory being created
+ @return True if the operation success.
+ @throws {@link AccessControlException} if permission to create file is
+ denied by the system. As usually on the client side the exception will
+ be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
+ @throws QuotaExceededException if the operation would violate
+ any quota restriction.]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a listing of the indicated directory]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Client programs can cause stateful changes in the NameNode
+ that affect other clients. A client may obtain a file and
+ neither abandon nor complete it. A client might hold a series
+ of locks that prevent other clients from proceeding.
+ Clearly, it would be bad if a client held a bunch of locks
+ that it never gave up. This can happen easily if the client
+ dies unexpectedly.
+ <p>
+ So, the NameNode will revoke the locks and live file-creates
+ for clients that it thinks have died. A client tells the
+ NameNode that it is still alive by periodically calling
+ renewLease(). If a certain amount of time passes since
+ the last call to renewLease(), the NameNode assumes the
+ client has died.]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a set of statistics about the filesystem.
+ Right now, only three values are returned.
+ <ul>
+ <li> [0] contains the total storage capacity of the system, in bytes.</li>
+ <li> [1] contains the total used space of the system, in bytes.</li>
+ <li> [2] contains the available storage of the system, in bytes.</li>
+ <li> [3] contains number of under replicated blocks in the system.</li>
+ <li> [4] contains number of blocks with a corrupt replica. </li>
+ <li> [5] contains number of blocks without any good replicas left. </li>
+ </ul>
+ Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
+ actual numbers to index into the array.]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a report on the system's current datanodes.
+ One DatanodeInfo object is returned for each DataNode.
+ Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
+ otherwise all datanodes if type is ALL.]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the block size for the given file.
+ @param filename The name of the file
+ @return The number of bytes in each block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+ <p>
+ Safe mode is a name node state when it
+ <ol><li>does not accept changes to name space (read-only), and</li>
+ <li>does not replicate or delete blocks.</li></ol>
+
+ <p>
+ Safe mode is entered automatically at name node startup.
+ Safe mode can also be entered manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ <p>
+ At startup the name node accepts data node reports collecting
+ information about block locations.
+ In order to leave safe mode it needs to collect a configurable
+ percentage called threshold of blocks, which satisfy the minimal
+ replication condition.
+ The minimal replication condition is that each block must have at least
+ <tt>dfs.replication.min</tt> replicas.
+ When the threshold is reached the name node extends safe mode
+ for a configurable amount of time
+ to let the remaining data nodes to check in before it
+ will start replicating missing blocks.
+ Then the name node leaves safe mode.
+ <p>
+ If safe mode is turned on manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ then the name node stays in safe mode until it is manually turned off
+ using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ Current state of the name node can be verified using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ <h4>Configuration parameters:</h4>
+ <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
+ <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
+ <tt>dfs.replication.min</tt> is the minimal replication parameter.
+
+ <h4>Special cases:</h4>
+ The name node does not enter safe mode at startup if the threshold is
+ set to 0 or if the name space is empty.<br>
+ If the threshold is set to 1 then all blocks need to have at least
+ minimal replication.<br>
+ If the threshold value is greater than 1 then the name node will not be
+ able to turn off safe mode automatically.<br>
+ Safe mode can always be turned off manually.
+
+ @param action <ul> <li>0 leave safe mode;</li>
+ <li>1 enter safe mode;</li>
+ <li>2 get safe mode state.</li></ul>
+ @return <ul><li>0 if the safe mode is OFF or</li>
+ <li>1 if the safe mode is ON.</li></ul>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+ <p>
+ Saves current namespace into storage directories and reset edits log.
+ Requires superuser privilege and safe mode.
+
+ @throws AccessControlException if the superuser privilege is violated.
+ @throws IOException if image creation failed.]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Tells the namenode to reread the hosts and exclude files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previous upgrade.
+ Remove file system state saved during the upgrade.
+ The upgrade will become irreversible.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
+
+ @param action {@link FSConstants.UpgradeAction} to perform
+ @return upgrade status information or null if no upgrades are in progress
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode data structures into specified file. If file
+ already exists, then append.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory.
+ @param src The string representation of the path to the file
+ @throws IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
+ @param path The string representation of the path]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the quota for a directory.
+ @param path The string representation of the path to the directory
+ @param namespaceQuota Limit on the number of names in the tree rooted
+ at the directory
+ @param diskspaceQuota Limit on disk space occupied all the files under
+ this directory.
+ <br><br>
+
+ The quota can have three types of values : (1) 0 or more will set
+ the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
+ the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
+ implies the quota will be reset. Any other value is a runtime error.
+
+ @throws FileNotFoundException if the path is a file or
+ does not exist
+ @throws QuotaExceededException if the directory size
+ is greater than the given quota]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="client" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write all metadata for this file into persistent storage.
+ The file must be currently open for writing.
+ @param src The string representation of the path
+ @param client The string representation of the client]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the modification and access time of the file to the specified time.
+ @param src The string representation of the path
+ @param mtime The number of milliseconds since Jan 1, 1970.
+ Setting mtime to -1 means that modification time should not be set
+ by this call.
+ @param atime The number of milliseconds since Jan 1, 1970.
+ Setting atime to -1 means that access time should not be set
+ by this call.]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compared to the previous version the following changes have been introduced:
+ (Only the latest change is reflected.
+ The log of historical changes can be retrieved from the svn).
+ 41: saveNamespace introduced.]]>
+ </doc>
+ </field>
+ <field name="GET_STATS_CAPACITY_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_USED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_REMAINING_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[ClientProtocol is used by user code via
+ {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
+ with the NameNode. User code can manipulate the directory namespace,
+ as well as open/close file streams, etc.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <class name="DatanodeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID("").]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID copy constructor
+
+ @param from]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeID
+ @param nodeName (hostname:portNumber)
+ @param storageID data storage ID
+ @param infoPort info server port
+ @param ipcPort ipc server port]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname:portNumber.]]>
+ </doc>
+ </method>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
+ </doc>
+ </method>
+ <method name="getIpcPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
+ </doc>
+ </method>
+ <method name="setStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storageID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[sets the data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname and no :portNumber.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="updateRegInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Update fields when a new registration request comes in.
+ Note that this does not update storageID.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Comparable.
+ Basis of compare is the String name (host:portNumber) only.
+ @param that
+ @return as specified by Comparable.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageID" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="infoPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeID is composed of the data node
+ name (hostname:portNumber) and the data storage ID,
+ which it currently represents.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw capacity.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getNonDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node as percentage of present capacity]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw free space.]]>
+ </doc>
+ </method>
+ <method name="getRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The remaining space as percentage of configured capacity.]]>
+ </doc>
+ </method>
+ <method name="getLastUpdate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="getXceiverCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[number of active connections]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="capacity" type="long"/>
+ <doc>
+ <![CDATA[Sets raw capacity.]]>
+ </doc>
+ </method>
+ <method name="setRemaining"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="remaining" type="long"/>
+ <doc>
+ <![CDATA[Sets raw free space.]]>
+ </doc>
+ </method>
+ <method name="setLastUpdate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lastUpdate" type="long"/>
+ <doc>
+ <![CDATA[Sets time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="setXceiverCount"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xceiverCount" type="int"/>
+ <doc>
+ <![CDATA[Sets number of active connections]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[rack name]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the rack name]]>
+ </doc>
+ </method>
+ <method name="getHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setHostName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="getDatanodeReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for reporting the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="dumpDatanode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for printing the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="startDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="isDecommissionInProgress" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node is in the process of being decommissioned]]>
+ </doc>
+ </method>
+ <method name="isDecommissioned" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node has been decommissioned.]]>
+ </doc>
+ </method>
+ <method name="setDecommissioned"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sets the admin state to indicate that decommision is complete.]]>
+ </doc>
+ </method>
+ <method name="setAdminState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
+ <doc>
+ <![CDATA[Sets the admin state of this node.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="capacity" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dfsUsed" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="remaining" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="lastUpdate" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="xceiverCount" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="hostName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HostName as suplied by the datanode during registration as its
+ name. Namenode uses datanode IP address as the name.]]>
+ </doc>
+ </field>
+ <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeInfo represents the status of a DataNode.
+ This object is used for communication in the
+ Datanode Protocol and the Client Protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <interface name="DataTransferProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="DATA_TRANSFER_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version for data transfers between clients and datanodes
+ This should change when serialization of DatanodeInfo, not just
+ when protocol changes. It is not very obvious.]]>
+ </doc>
+ </field>
+ <field name="OP_WRITE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_METADATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_REPLACE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_COPY_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_BLOCK_CHECKSUM" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_INVALID" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_EXISTS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_CHECKSUM_OK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Client transfers data to/from datanode using a streaming protocol.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <interface name="FSConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="MIN_BLOCKS_FOR_WRITE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_INVALIDATE_CHUNK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_DONT_SET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_RESET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEARTBEAT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_HARDLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_RECOVER_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_DEPTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SMALL_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Distributed upgrade actions:
+
+ 1. Get upgrade status.
+ 2. Get detailed upgrade status.
+ 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <class name="LocatedBlock" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlock"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isCorrupt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
+ objects. It tells where to find a Block.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <class name="LocatedBlocks" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlocks" type="long, java.util.List, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocatedBlocks" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get located blocks.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[Get located block.]]>
+ </doc>
+ </method>
+ <method name="locatedBlockCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get number of located blocks.]]>
+ </doc>
+ </method>
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isUnderConstruction" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return ture if file was under construction when
+ this LocatedBlocks was constructed, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="findBlock" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Find block containing specified offset.
+
+ @return block if found, or null otherwise.]]>
+ </doc>
+ </method>
+ <method name="insertRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockIdx" type="int"/>
+ <param name="newBlocks" type="java.util.List"/>
+ </method>
+ <method name="getInsertIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="binSearchResult" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of blocks with their locations and the file length.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <class name="QuotaExceededException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="QuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPathName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This exception is thrown when modification to HDFS results in violation
+ of a directory quota. A directory quota might be namespace quota (limit
+ on number of files and directories) or a diskspace quota (limit on space
+ taken by all the file under the directory tree). <br> <br>
+
+ The message for the exception specifies the directory where the quota
+ was violated and actual quotas.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
+ <class name="UnregisteredDatanodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a datanode that has not previously
+ registered is trying to access the name node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+ <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+ <class name="Balancer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Run a balancer
+ @param args]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main method of Balancer
+ @param args arguments to a Balancer
+ @exception any exception occurs during datanode balancing]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return this balancer's configuration]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[set this balancer's configuration]]>
+ </doc>
+ </method>
+ <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum number of concurrent blocks moves for
+ balancing purpose at a datanode]]>
+ </doc>
+ </field>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALREADY_RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_PROGRESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IO_EXCEPTION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ILLEGAL_ARGS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
+ when some datanodes become full or when new empty nodes join the cluster.
+ The tool is deployed as an application program that can be run by the
+ cluster administrator on a live HDFS cluster while applications
+ adding and deleting files.
+
+ <p>SYNOPSIS
+ <pre>
+ To start:
+ bin/start-balancer.sh [-threshold <threshold>]
+ Example: bin/ start-balancer.sh
+ start the balancer with a default threshold of 10%
+ bin/ start-balancer.sh -threshold 5
+ start the balancer with a threshold of 5%
+ To stop:
+ bin/ stop-balancer.sh
+ </pre>
+
+ <p>DESCRIPTION
+ <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
+ default value of 10%. The threshold sets a target for whether the cluster
+ is balanced. A cluster is balanced if for each datanode, the utilization
+ of the node (ratio of used space at the node to total capacity of the node)
+ differs from the utilization of the (ratio of used space in the cluster
+ to total capacity of the cluster) by no more than the threshold value.
+ The smaller the threshold, the more balanced a cluster will become.
+ It takes more time to run the balancer for small threshold values.
+ Also for a very small threshold the cluster may not be able to reach the
+ balanced state when applications write and delete files concurrently.
+
+ <p>The tool moves blocks from highly utilized datanodes to poorly
+ utilized datanodes iteratively. In each iteration a datanode moves or
+ receives no more than the lesser of 10G bytes or the threshold fraction
+ of its capacity. Each iteration runs no more than 20 minutes.
+ At the end of each iteration, the balancer obtains updated datanodes
+ information from the namenode.
+
+ <p>A system property that limits the balancer's use of bandwidth is
+ defined in the default configuration file:
+ <pre>
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>1048576</value>
+ <description> Specifies the maximum bandwidth that each datanode
+ can utilize for the balancing purpose in term of the number of bytes
+ per second. </description>
+ </property>
+ </pre>
+
+ <p>This property determines the maximum speed at which a block will be
+ moved from one datanode to another. The default value is 1MB/s. The higher
+ the bandwidth, the faster a cluster can reach the balanced state,
+ but with greater competition with application processes. If an
+ administrator changes the value of this property in the configuration
+ file, the change is observed when HDFS is next restarted.
+
+ <p>MONITERING BALANCER PROGRESS
+ <p>After the balancer is started, an output file name where the balancer
+ progress will be recorded is printed on the screen. The administrator
+ can monitor the running of the balancer by reading the output file.
+ The output shows the balancer's status iteration by iteration. In each
+ iteration it prints the starting time, the iteration number, the total
+ number of bytes that have been moved in the previous iterations,
+ the total number of bytes that are left to move in order for the cluster
+ to be balanced, and the number of bytes that are being moved in this
+ iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
+ To Move" is decreasing.
+
+ <p>Running multiple instances of the balancer in an HDFS cluster is
+ prohibited by the tool.
+
+ <p>The balancer automatically exits when any of the following five
+ conditions is satisfied:
+ <ol>
+ <li>The cluster is balanced;
+ <li>No block can be moved;
+ <li>No block has been moved for five consecutive iterations;
+ <li>An IOException occurs while communicating with the namenode;
+ <li>Another balancer is running.
+ </ol>
+
+ <p>Upon exit, a balancer returns an exit code and prints one of the
+ following messages to the output file in corresponding to the above exit
+ reasons:
+ <ol>
+ <li>The cluster is balanced. Exiting
+ <li>No block can be moved. Exiting...
+ <li>No block has been moved for 3 iterations. Exiting...
+ <li>Received an IO exception: failure reason. Exiting...
+ <li>Another balancer is running. Exiting...
+ </ol>
+
+ <p>The administrator can interrupt the execution of the balancer at any
+ time by running the command "stop-balancer.sh" on the machine where the
+ balancer is running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+ <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <class name="GenerationStamp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="GenerationStamp"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
+ </doc>
+ </constructor>
+ <method name="getStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="setStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="nextStamp" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[First increments the counter and then returns the stamp]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="x" type="long"/>
+ <param name="y" type="long"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="equalsWithWildcard" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="x" type="long"/>
+ <param name="y" type="long"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="WILDCARD_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIRST_VALID_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <interface name="HdfsConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="READ_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy internal HDFS constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Type of the node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <class name="InconsistentFSStateException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when file system state is inconsistent
+ and is not recoverable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <class name="IncorrectVersionException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IncorrectVersionException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when external version does not match
+ current version of the appication.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
+ <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty storage info of the specified type]]>
+ </doc>
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return default iterator
+ This iterator returns all entires of storageDirs]]>
+ </doc>
+ </method>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ <doc>
+ <![CDATA[Return iterator based on Storage Directory Type
+ This iterator selects entires of storageDirs of type dirType and returns
+ them via the Iterator]]>
+ </doc>
+ </method>
+ <method name="getNumStorageDirs" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ </method>
+ <method name="addStorageDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkVersionUpgradable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="oldVersion" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the upgrade from the given old version is supported. If
+ no upgrade is supported, it throws IncorrectVersionException.
+
+ @param oldVersion]]>
+ </doc>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get common storage fields.
+ Should be overloaded if additional fields need to be get.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set common storage fields.
+ Should be overloaded if additional fields need to be set.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write all data storage files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unlockAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock all storage directories.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isLockSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether underlying file system supports file locking.
+
+ @return <code>true</code> if exclusive locks are supported or
+ <code>false</code> otherwise.
+ @throws IOException
+ @see StorageDirectory#lock()]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCorruptedData"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.RandomAccessFile"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STORAGE_FILE_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Storage information file.
+ <p>
+ Local storage information is stored in a separate file VERSION.
+ It contains type of the node,
+ the storage layout version, the namespace id, and
+ the fs state creation time.
+ <p>
+ Local storage can reside in multiple directories.
+ Each directory should contain the same VERSION file as the others.
+ During startup Hadoop servers (name-node and data-nodes) read their local
+ storage information from them.
+ <p>
+ The servers hold a lock for each storage directory while they run so that
+ other nodes were not able to startup sharing the same storage.
+ The locks are released when the servers stop (normally or abnormally).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <class name="Storage.StorageDirectory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage.StorageDirectory" type="java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRoot" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get root directory of this storage]]>
+ </doc>
+ </method>
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get storage directory type]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read version file.
+
+ @throws IOException if file cannot be read or contains inconsistent data]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write version file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clearDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear and re-create storage directory.
+ <p>
+ Removes contents of the current directory and creates an empty directory.
+
+ This does not fully format storage directory.
+ It cannot write the version file since it should be written last after
+ all other storage type dependent files are written.
+ Derived storage is responsible for setting specific storage values and
+ writing the version file to disk.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPreviousVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPreviousDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPreviousTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRemovedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFinalizedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLastCheckpointTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPreviousCheckpoint" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check consistency of the storage directory
+
+ @param startOpt a startup option.
+
+ @return state {@link StorageState} of the storage directory
+ @throws {@link InconsistentFSStateException} if directory state is not
+ consistent and cannot be recovered]]>
+ </doc>
+ </method>
+ <method name="doRecover"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete or recover storage state from previously failed transition.
+
+ @param curState specifies what/how the state should be recovered
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Lock storage to provide exclusive access.
+
+ <p> Locking is not supported by all file systems.
+ E.g., NFS does not consistently support exclusive locks.
+
+ <p> If locking is supported we guarantee exculsive access to the
+ storage directory. Otherwise, no guarantee is given.
+
+ @throws IOException if locking fails]]>
+ </doc>
+ </method>
+ <method name="unlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock storage.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[One of the storage directories.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <interface name="Storage.StorageDirType" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isOfType" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ </method>
+ <doc>
+ <![CDATA[An interface to denote storage directory type
+ Implementations can define a type for storage directory by implementing
+ this interface.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <class name="Storage.StorageState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <class name="StorageInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="int, int, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLayoutVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNamespaceID" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <field name="layoutVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namespaceID" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="cTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Common class for storage information.
+
+ TODO namespaceID should be long and computed as hash(address + port)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <interface name="Upgradeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the upgrade object.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of the software component, which this object is upgrading.
+ @return type]]>
+ </doc>
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Description of the upgrade object for displaying.
+ @return description]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Upgrade status determines a percentage of the work done out of the total
+ amount required by the upgrade.
+
+ 100% means that the upgrade is completed.
+ Any value < 100 means it is not complete.
+
+ The return value should provide at least 2 values, e.g. 0 and 100.
+ @return integer value in the range [0, 100].]]>
+ </doc>
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Prepare for the upgrade.
+ E.g. initialize upgrade data structures and set status to 0.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. name-node informs data-nodes that they must perform a distributed upgrade.
+
+ @return an UpgradeCommand for broadcasting.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade.
+ E.g. cleanup upgrade data structures or write metadata to disk.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. data-nodes inform the name-node that they completed the upgrade
+ while other data-nodes are still upgrading.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status report for the upgrade.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return {@link UpgradeStatusReport}
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Common interface for distributed upgrade objects.
+
+ Each upgrade object corresponds to a layout version,
+ which is the latest version that should be upgraded using this object.
+ That is all components whose layout version is greater or equal to the
+ one returned by {@link #getVersion()} must be upgraded with this object.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <class name="UpgradeManager" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeState" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setUpgradeState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uState" type="boolean"/>
+ <param name="uVersion" type="int"/>
+ </method>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initializeUpgrade" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isUpgradeCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="currentUpgrades" type="java.util.SortedSet"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeState" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Generic upgrade manager.
+
+ {@link #broadcastCommand} is the command that should be]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <class name="UpgradeObject" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ <constructor name="UpgradeObject"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="status" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Abstract upgrade object.
+
+ Contains default implementation of common methods of {@link Upgradeable}
+ interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <class name="UpgradeObjectCollection" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectCollection"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="versionFrom" type="int"/>
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of upgrade objects.
+
+ Upgrade objects should be registered here before they can be used.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <class name="UpgradeStatusReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UpgradeStatusReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeStatusReport" type="int, short, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the currently running upgrade.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
+
+ @see Upgradeable#getUpgradeStatus()]]>
+ </doc>
+ </method>
+ <method name="isFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is current upgrade finalized.
+ @return true if finalized or false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <doc>
+ <![CDATA[Get upgradeStatus data as a text for reporting.
+ Should be overloaded for a particular upgrade specific upgradeStatus data.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return text]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print basic upgradeStatus details.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="version" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeStatus" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="finalized" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base upgrade upgradeStatus class.
+ Overload this class if specific status fields need to be reported.
+
+ Describes status of current upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Util"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="now" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Current system time.
+ @return current time in msec.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
+ </doc>
+ </method>
+ <method name="newSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
+ </doc>
+ </method>
+ <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the DataNode object]]>
+ </doc>
+ </method>
+ <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSelfAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNamenode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the namenode's identifier]]>
+ </doc>
+ </method>
+ <method name="setNewStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.
+ This method can only be called by the offerService thread.
+ Otherwise, deadlock might occur.]]>
+ </doc>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="e" type="java.io.IOException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Main loop for the DataNode. Runs until shutdown,
+ forever calling remote NameNode functions.]]>
+ </doc>
+ </method>
+ <method name="notifyNamenodeReceivedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
+ That's the loop that connects to the NameNode and provides basic DataNode
+ functionality.
+
+ Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
+ </doc>
+ </method>
+ <method name="runDatanodeDaemon"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate a single datanode object. This must be run by invoking
+ {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
+ </doc>
+ </method>
+ <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="makeInstance" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dataDirs" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make an instance of DataNode after ensuring that at least one of the
+ given data directories (and their parent directories, if necessary)
+ can be created.
+ @param dataDirs List of directories, where the new DataNode instance should
+ keep its files.
+ @param conf Configuration instance to use.
+ @return DataNode instance for given list of data dirs and conf, or null if
+ no directory from this directory list can be created.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="scheduleBlockReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delay" type="long"/>
+ <doc>
+ <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
+ </doc>
+ </method>
+ <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is used for testing.
+ Examples are adding and deleting blocks directly.
+ The most common usage will be when the data node's storage is similated.
+
+ @return the fsdataset that stores the blocks]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"/>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="finalize" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="keepLength" type="boolean"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EMPTY_DEL_HINT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PKT_HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Header size for a packet]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[DataNode is a class (and program) that stores a set of
+ blocks for a DFS deployment. A single deployment can
+ have one or many DataNodes. Each DataNode communicates
+ regularly with a single NameNode. It also communicates
+ with client code and other DataNodes from time to time.
+
+ DataNodes store a series of named blocks. The DataNode
+ allows client code to read these blocks, or to write new
+ block data. The DataNode may also, in response to instructions
+ from its NameNode, delete blocks or copy blocks to/from other
+ DataNodes.
+
+ The DataNode maintains just one critical table:
+ block-> stream of bytes (of BLOCK_SIZE or less)
+
+ This info is stored on a local disk. The DataNode
+ reports the table's contents to the NameNode upon startup
+ and every so often afterwards.
+
+ DataNodes spend their lives in an endless loop of asking
+ the NameNode for something to do. A NameNode cannot connect
+ to a DataNode directly; a NameNode simply returns values from
+ functions invoked by a DataNode.
+
+ DataNodes maintain an open server socket so that client code
+ or other DataNodes can read/write data. The host/port for
+ this server is reported to the NameNode, which then sends that
+ information to clients or other DataNodes that might be interested.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Data storage information file.
+ <p>
+ @see Storage]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <class name="FSDataset" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
+ <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[An FSDataset has a directory where it loads its data files.]]>
+ </doc>
+ </constructor>
+ <method name="getMetaFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="findBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[Return the block file for the given ID]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total space used by dfs datanode]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return total capacity, used and unused]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Find the block's on-disk length]]>
+ </doc>
+ </method>
+ <method name="getBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get File name for a given block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkOffset" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns handles to the block file and its metadata file]]>
+ </doc>
+ </method>
+ <method name="detachBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="numLinks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the block if this block is linked to an existing
+ snapshot. This ensures that modifying this block does not modify
+ data in any existing snapshots.
+ @param block Block
+ @param numLinks Detach if the number of links exceed this value
+ @throws IOException
+ @return - true if the specified block was detached]]>
+ </doc>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="isRecovery" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start writing to a block file
+ If isRecovery is true and the block pre-exists, then we kill all
+ volumeMap.put(b, v);
+ volumeMap.put(b, v);
+ other threads that might be writing to this block, and then reopen the file.]]>
+ </doc>
+ </method>
+ <method name="getChannelPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the offset in the block to which the
+ the next write will write data to.]]>
+ </doc>
+ </method>
+ <method name="setChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="dataOffset" type="long"/>
+ <param name="ckOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the offset in the block to which the
+ the next write will write data to.]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete the block write!]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove the temporary block file (if any)]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a table of block data]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Check whether the given block is a valid one.]]>
+ </doc>
+ </method>
+ <method name="validateBlockMetadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[We're informed that a block is no longer valid. We
+ could lazily garbage-collect the block, but why bother?
+ just get rid of it.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Turn the block identifier into a filename.]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[check if a data directory is healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="METADATA_EXTENSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="METADATA_VERSION" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSDataset manages a set of data blocks. Each block
+ has a unique name and an extent on disk.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <interface name="FSDatasetInterface" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the length of the metadata file of the specified block
+ @param b - the block for which the metadata length is desired
+ @return the length of the metadata file for the specified block.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns metaData of block b as an input stream (and its length)
+ @param b - the block
+ @return the metadata input stream;
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Does the meta file exist for this block?
+ @param b - the block
+ @return true of the metafile for specified block exits
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the specified block's on-disk length (excluding metadata)
+ @param b
+ @return the specified block's on-disk length (excluding metadta)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the generation stamp stored with the block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream to read the contents of the specified block
+ @param b
+ @return an input stream to read the contents of the specified block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ @param b
+ @param seekOffset
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkoff" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ The block is still in the tmp directory and is not finalized
+ @param b
+ @param blkoff
+ @param ckoff
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="isRecovery" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the block and returns output streams to write data and CRC
+ @param b
+ @param isRecovery True if this is part of erro recovery, otherwise false
+ @return a BlockWriteStreams object to allow writing the block data
+ and CRC
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the block to the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
+ The block size is what is in the parameter b and it must match the amount
+ of data written
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
+ The temporary file associated with this block is deleted.
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block report - the full list of blocks stored
+ @return - the block report - the full list of blocks stored]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Is the block valid?
+ @param b
+ @return - true if the specified block is valid]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the specified blocks
+ @param invalidBlks - the blocks to be invalidated
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[Check if all the data directories are healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stringifies the name of the storage]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getChannelPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current offset in the data stream.
+ @param b
+ @param stream The stream to the data file and checksum file
+ @return the position of the file pointer in the data stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="dataOffset" type="long"/>
+ <param name="ckOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the file pointer of the data stream and checksum stream to
+ the specified values.
+ @param b
+ @param stream The stream for the data file and checksum file
+ @param dataOffset The position to which the file pointre for the data stream
+ should be set
+ @param ckOffset The position to which the file pointre for the checksum stream
+ should be set
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="validateBlockMetadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Validate that the contents in the Block matches
+ the file on disk. Returns true if everything is fine.
+ @param b The block to be verified.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is an interface for the underlying storage that stores blocks for
+ a data node.
+ Examples are the FSDataset (which stores blocks on dirs) and
+ SimulatedFSDataset (which simulates data).]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class contains the input streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This class contains the output streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides the input stream and length of the metadata
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+ <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="UpgradeObjectDatanode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Specifies how the upgrade is performed.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade and return a status complete command for broadcasting.
+
+ Data-nodes finish upgrade at different times.
+ The data-node needs to re-confirm with the name-node that the upgrade
+ is complete while other nodes are still upgrading.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for data-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
+ The MBean is register using the name
+ "hadoop:service=DataNode,name=DataNodeActivity-<storageid>"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <class name="DataNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various DataNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #blocksRead}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+ <interface name="FSDatasetMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the total space (in bytes) used by dfs datanode
+ @return the total space used by dfs datanode
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
+ @return total capacity of storage (used and unused)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the amount of free storage space (in bytes)
+ @return The amount of free storage space
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the storage id of the underlying storage]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
+ a data node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+ * Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the FSDatasetMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Data Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <class name="CorruptReplicasMap" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CorruptReplicasMap"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addToCorruptReplicasMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt.
+
+ @param blk Block to be added to CorruptReplicasMap
+ @param dn DatanodeDescriptor which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="numCorruptReplicas" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Stores information about all corrupt blocks in the File System.
+ A Block is considered corrupt only if all of its replicas are
+ corrupt. While reporting replicas of a Block, we hide any corrupt
+ copies. These copies are removed once Block is found to have
+ expected number of good replicas.
+ Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+ @param nodeID id of the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param hostName it could be different from host specified for DatanodeID]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param capacity capacity of the data node
+ @param dfsUsed space used by the data node
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param capacity capacity of the data node, including space used by non-dfs
+ @param dfsUsed the used space by dfs datanode
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <method name="numBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlocksScheduled" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Approximate number of blocks currently scheduled to be written
+ to this datanode.]]>
+ </doc>
+ </method>
+ <field name="isAlive" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
+ such as available storage capacity, last update time, etc.,
+ and maintains a set of blocks stored on the datanode.
+
+ This data structure is a data structure that is internal
+ to the namenode. It is *not* sent over-the-wire to the Client
+ or the Datnodes. Neither is it stored persistently in the
+ fsImage.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Block and targets pair]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <class name="FileChecksumServlets" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Servlets for file checksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.GetServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Get FileChecksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.RedirectServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileDataServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+ <doc>
+ <![CDATA[Create a redirection URI]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/data[/<path>] HTTP/1.1
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <class name="FsckServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FsckServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <class name="FSEditLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create empty edit log files.
+ Initialize the output stream for logging.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createEditLogFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Shutdown the file store.]]>
+ </doc>
+ </method>
+ <method name="logSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="logOpenFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add open lease record to edit log.
+ Records the block locations of the last block.]]>
+ </doc>
+ </method>
+ <method name="logCloseFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
+ <doc>
+ <![CDATA[Add close lease record to edit log.]]>
+ </doc>
+ </method>
+ <method name="logMkDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
+ <doc>
+ <![CDATA[Add create directory record to edit log]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FSImage" type="java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Represents an Image (image and edit file).]]>
+ </doc>
+ </constructor>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write last checkpoint time and version file into the storage directory.
+
+ The version file should always be written last.
+ Missing or corrupted version file indicates that
+ the checkpoint is not valid.
+
+ @param sd storage directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="saveFSImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save the contents of the FS image
+ and create empty edits.]]>
+ </doc>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFsEditName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="checkpointTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="removedStorageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[list of failed (and thus removed) storages]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <class name="FSNamesystem" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
+ <method name="getNamespaceDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getNamespaceEditsDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the default path permission when upgrading from releases with no
+ permissions (<=0.15) to releases with permissions (>=0.16)]]>
+ </doc>
+ </method>
+ <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the FSNamesystem object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close down this file system manager.
+ Causes heartbeat and lease daemons to stop; waits briefly for
+ them to finish, but a short timeout returns control back to caller.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get block locations within the specified range.
+ @see ClientProtocol#getBlockLocations(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <param name="doAccessTime" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get block locations within the specified range.
+ @see ClientProtocol#getBlockLocations(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[stores the modification and access time for this inode.
+ The access time is precise upto an hour. The transaction, if needed, is
+ written to the edits log but is not flushed.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ The NameNode sets new replication and schedules either replication of
+ under-replicated data blocks or removal of the eccessive block copies
+ if the blocks are over-replicated.
+
+ @see ClientProtocol#setReplication(String, short)
+ @param src file name
+ @param replication new replication
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client would like to obtain an additional block for the indicated
+ filename (which is being written-to). Return an array that consists
+ of the block, plus a set of machines. The first on this list should
+ be where the client writes data. Subsequent items in the list must
+ be provided in the connection to the first datanode.
+
+ Make sure the previous blocks have been reported by datanodes and
+ are replicated. Will return an empty 2-elt array if we want the
+ client to "try again later".]]>
+ </doc>
+ </method>
+ <method name="abandonBlock" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client would like to let go of the given block]]>
+ </doc>
+ </method>
+ <method name="completeFile" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markBlockAsCorrupt"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt
+ @param blk Block to be marked as corrupt
+ @param dn Datanode which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="invalidateBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the given block on the given datanode.]]>
+ </doc>
+ </method>
+ <method name="renameTo" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Change the indicated filename.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove the indicated filename from namespace. If the filename
+ is a directory (non empty) and recursive is set to false then throw exception.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create all the necessary directories]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a listing of all files at 'src'. The Object[] array
+ exists so we can return file attributes (soon to be implemented)]]>
+ </doc>
+ </method>
+ <method name="registerDatanode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+ <p>
+ The purpose of registration is to identify whether the new datanode
+ serves a new data storage, and will report new data block copies,
+ which the namenode was not aware of; or the datanode is a replacement
+ node for the data storage that was previously served by a different
+ or the same (in terms of host:port) datanode.
+ The data storages are distinguished by their storageIDs. When a new
+ data storage is reported the namenode issues a new unique storageID.
+ <p>
+ Finally, the namenode returns its namespaceID as the registrationID
+ for the datanodes.
+ namespaceID is a persistent attribute of the name space.
+ The registrationID is checked every time the datanode is communicating
+ with the namenode.
+ Datanodes with inappropriate registrationID are rejected.
+ If the namenode stops, and then restarts it can restore its
+ namespaceID and will continue serving the datanodes that has previously
+ registered with the namenode without restarting the whole cluster.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
+ </doc>
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get registrationID for datanodes based on the namespaceID.
+
+ @see #registerDatanode(DatanodeRegistration)
+ @see FSImage#newNamespaceID()
+ @return registration ID]]>
+ </doc>
+ </method>
+ <method name="computeDatanodeWork" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Compute block replication and block invalidation work
+ that can be scheduled on data-nodes.
+ The datanode will be informed of this work at the next heartbeat.
+
+ @return number of blocks scheduled for replication or removal.]]>
+ </doc>
+ </method>
+ <method name="setNodeReplicationLimit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="limit" type="int"/>
+ </method>
+ <method name="removeDatanode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[remove a datanode descriptor
+ @param nodeID datanode ID]]>
+ </doc>
+ </method>
+ <method name="processReport"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting all its blocks. Use this info to
+ update the (machine-->blocklist) and (block-->machinelist) tables.]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting that it received a certain block.]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total raw bytes including non-dfs used space.]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedNonDFS" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes for non DFS purposes such
+ as storing temporary files on the local file system]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total non-used raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of connections.]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList"/>
+ <param name="dead" type="java.util.ArrayList"/>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stop decommissioning the specified datanodes.]]>
+ </doc>
+ </method>
+ <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getDFSNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link NameNode#getNameNodeAddress()} instead.">
+ <doc>
+ <![CDATA[@deprecated use {@link NameNode#getNameNodeAddress()} instead.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="java.util.Date"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rereads the config to get hosts and exclude list file names.
+ Rereads the files to update the hosts and exclude lists. It
+ checks if any of the hosts have changed states:
+ 1. Added to hosts --> no further work needed here.
+ 2. Removed from hosts --> mark AdminState as decommissioned.
+ 3. Added to exclude --> start decommission.
+ 4. Removed from exclude --> stop decommission.]]>
+ </doc>
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get data node by storage ID.
+
+ @param nodeID
+ @return DatanodeDescriptor or null if the node is not found.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="randomDataNode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRandomDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of blocks in the system.]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptReplicaBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns number of blocks with corrupt replicas]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get FSNamesystemMetrics]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[shutdown FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="numLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of live data nodes
+ @return Number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="numDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return Number of dead data nodes]]>
+ </doc>
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="AUDIT_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="auditLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="corruptReplicas" type="org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lmthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsNamesystemObject" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSNamesystem does the actual bookkeeping work for the
+ DataNode.
+
+ It tracks several important tables.
+
+ 1) valid fsname --> blocklist (kept on disk, logged)
+ 2) Set of all valid blocks (inverted #1)
+ 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
+ 4) machine --> blocklist (inverted #2)
+ 5) LRU cache of updated-heartbeat machines]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
+ <class name="JspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JspHelper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="randomNode" return="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="streamBlockInAscii"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ <param name="genStamp" type="long"/>
+ <param name="blockSize" type="long"/>
+ <param name="offsetIntoBlock" type="long"/>
+ <param name="chunkSizeToView" type="long"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList"/>
+ <param name="dead" type="java.util.ArrayList"/>
+ </method>
+ <method name="addTableHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <param name="row" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableFooter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSafeModeText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarningText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsn" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"/>
+ </method>
+ <method name="getInodeLimitText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sortNodeList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="java.util.ArrayList"/>
+ <param name="field" type="java.lang.String"/>
+ <param name="order" type="java.lang.String"/>
+ </method>
+ <method name="printPathWithLinks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.lang.String"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="printGotoForm"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTitle"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="nameNodeAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="defaultChunkSizeToView" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <class name="LeaseExpiredException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LeaseExpiredException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The lease that was being used to create this file has expired.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <class name="LeaseManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@return the lease containing src]]>
+ </doc>
+ </method>
+ <method name="countLease" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the number of leases currently in the system]]>
+ </doc>
+ </method>
+ <method name="setLeasePeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="softLimit" type="long"/>
+ <param name="hardLimit" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[LeaseManager does the lease housekeeping for writing on files.
+ This class also provides useful static methods for lease recovery.
+
+ Lease Recovery Algorithm
+ 1) Namenode retrieves lease information
+ 2) For each file f in the lease, consider the last block b of f
+ 2.1) Get the datanodes which contains b
+ 2.2) Assign one of the datanodes as the primary datanode p
+
+ 2.3) p obtains a new generation stamp form the namenode
+ 2.4) p get the block info from each datanode
+ 2.5) p computes the minimum block length
+ 2.6) p updates the datanodes, which have a valid generation stamp,
+ with the new generation stamp and the minimum block length
+ 2.7) p acknowledges the namenode the update results
+
+ 2.8) Namenode updates the BlockInfo
+ 2.9) Namenode removes f from the lease
+ and removes the lease once all files have been removed
+ 2.10) Namenode commit changes to edit log]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ListPathsServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="buildRoot" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <doc>
+ <![CDATA[Build a map from the query string, setting values and defaults.]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
+ }
+
+ Where <i>option</i> (default) in:
+ recursive (&quot;no&quot;)
+ filter (&quot;.*&quot;)
+ exclude (&quot;\..*\.crc&quot;)
+
+ Response: A flat list of files/directories in the following format:
+ {@code
+ <listing path="..." recursive="(yes|no)" filter="..."
+ time="yyyy-MM-dd hh:mm:ss UTC" version="...">
+ <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
+ <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
+ blocksize="..."
+ replication="..." size="..."/>
+ </listing>
+ }]]>
+ </doc>
+ </method>
+ <field name="df" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Obtain meta-information about a filesystem.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <class name="NameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start NameNode.
+ <p>
+ The name-node can be started with one of the following startup options:
+ <ul>
+ <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
+ <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
+ <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
+ upgrade and create a snapshot of the current file system state</li>
+ <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
+ cluster back to the previous state</li>
+ </ul>
+ The option is passed via configuration field:
+ <tt>dfs.namenode.startup</tt>
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code> in the conf.
+
+ @param conf confirguration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Format a new filesystem. Destroys any filesystem that may already
+ exist at this location.]]>
+ </doc>
+ </method>
+ <method name="getNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="address" type="java.lang.String"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namenode" type="java.net.InetSocketAddress"/>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Wait for service to finish.
+ (Normally, it runs forever.)]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
+ </doc>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return a list of blocks & their locations on <code>datanode</code> whose
+ total size is <code>size</code>
+
+ @param datanode on which blocks are located
+ @param size total size of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client needs to give up on the block.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client has detected an error on the specified located blocks
+ and is reporting them to the server. For now, the namenode will
+ mark the block as corrupt. In the future we might
+ check the blocks are actually corrupt.]]>
+ </doc>
+ </method>
+ <method name="nextGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file.
+ @param src The string representation of the path to the file
+ @throws IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="isInSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the cluster currently in safe mode?]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the list of datanodes that the namenode should allow to
+ connect. Re-reads conf by creating new Configuration object and
+ uses the files list in the configuration to update the list.]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the size of the current edit log.]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the edit log.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the image]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode state into specified file]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Data node notify the name node that it is alive
+ Return an array of block-oriented commands for the datanode to execute.
+ This will be either a transfer or a delete operation.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="verifyRequest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify request.
+
+ Verifies correctness of the datanode version, registration ID, and
+ if the datanode does not need to be shutdown.
+
+ @param nodeReg data node registration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="verifyVersion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="version" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify version.
+
+ @param version
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFsImageName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file]]>
+ </doc>
+ </method>
+ <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFsImageNameCheckpoint" return="java.io.File[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file uploaded by periodic
+ checkpointing]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address on which the NameNodes is listening to.
+ @return the address on which the NameNodes is listening to.]]>
+ </doc>
+ </method>
+ <method name="getHttpAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address of the NameNodes http server,
+ which is used to access the name-node web UI.
+
+ @return the http address.]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DEFAULT_PORT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="stateChangeLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[NameNode serves as both directory namespace manager and
+ "inode table" for the Hadoop DFS. There is a single NameNode
+ running in any DFS deployment. (Well, except when there
+ is a second backup/failover NameNode.)
+
+ The NameNode controls two critical tables:
+ 1) filename->blocksequence (namespace)
+ 2) block->machinelist ("inodes")
+
+ The first table is stored on disk and is very precious.
+ The second table is rebuilt every time the NameNode comes
+ up.
+
+ 'NameNode' refers to both this class as well as the 'NameNode server'.
+ The 'FSNamesystem' class actually performs most of the filesystem
+ management. The majority of the 'NameNode' class itself is concerned
+ with exposing the IPC interface and the http server to the outside world,
+ plus some configuration management.
+
+ NameNode implements the ClientProtocol interface, which allows
+ clients to ask for DFS services. ClientProtocol is not
+ designed for direct use by authors of DFS client code. End-users
+ should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+
+ NameNode also implements the DatanodeProtocol interface, used by
+ DataNode programs that actually store DFS data blocks. These
+ methods are invoked repeatedly and automatically by all the
+ DataNodes in a DFS deployment.
+
+ NameNode also implements the NamenodeProtocol interface, used by
+ secondary namenodes or rebalancing processes to get partial namenode's
+ state, for example partial blocksMap etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <class name="NamenodeFsck" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode, java.util.Map, javax.servlet.http.HttpServletResponse"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf configuration (namenode config)
+ @param nn namenode that this fsck is going to use
+ @param pmap key=value[] map that is passed to the http servlet as url parameters
+ @param response the object into which this servelet writes the url contents
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="fsck"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check files on DFS, starting from the indicated path.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CORRUPT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEALTHY_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NONEXISTENT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILURE_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIXING_NONE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Don't attempt any fixing .]]>
+ </doc>
+ </field>
+ <field name="FIXING_MOVE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Move corrupted files to /lost+found .]]>
+ </doc>
+ </field>
+ <field name="FIXING_DELETE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete corrupted files.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link #FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
+ <class name="NamenodeFsck.FsckResult" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck.FsckResult"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isHealthy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DFS is considered healthy if there are no missing blocks.]]>
+ </doc>
+ </method>
+ <method name="addMissing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Add a missing block name, plus its size.]]>
+ </doc>
+ </method>
+ <method name="getMissingIds" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a list of missing block names (as list of Strings).]]>
+ </doc>
+ </method>
+ <method name="getMissingSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of missing data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setMissingSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="missingSize" type="long"/>
+ </method>
+ <method name="getExcessiveReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of over-replicated blocks.]]>
+ </doc>
+ </method>
+ <method name="setExcessiveReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="overReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getReplicationFactor" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the actual replication factor.]]>
+ </doc>
+ </method>
+ <method name="getMissingReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]>
+ </doc>
+ </method>
+ <method name="setMissingReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="underReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getTotalDirs" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of directories encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalDirs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalDirs" type="long"/>
+ </method>
+ <method name="getTotalFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of files encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalFiles" type="long"/>
+ </method>
+ <method name="getTotalOpenFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of files opened for write encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFiles" type="long"/>
+ <doc>
+ <![CDATA[Set total number of open files encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="getTotalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of scanned data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setTotalSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalSize" type="long"/>
+ </method>
+ <method name="getTotalOpenFilesSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of open files data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFilesSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFilesSize" type="long"/>
+ </method>
+ <method name="getReplication" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the intended replication factor, against which the over/under-
+ replicated blocks are counted. Note: this values comes from the current
+ Configuration supplied for the tool, so it may be different from the
+ value in DFS Configuration.]]>
+ </doc>
+ </method>
+ <method name="setReplication"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="replication" type="int"/>
+ </method>
+ <method name="getTotalBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of blocks in the scanned area.]]>
+ </doc>
+ </method>
+ <method name="setTotalBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalBlocks" type="long"/>
+ </method>
+ <method name="getTotalOpenFilesBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of blocks held by open files.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFilesBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFilesBlocks" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of currupted files.]]>
+ </doc>
+ </method>
+ <method name="setCorruptFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="corruptFiles" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[FsckResult of checking, plus overall DFS statistics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <class name="NotReplicatedYetException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NotReplicatedYetException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The file has not finished being written to enough datanodes yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <class name="SafeModeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when the name node is in safe mode.
+ Client cannot modified namespace until the safe mode is off.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <class name="SecondaryNameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a connection to the primary namenode.]]>
+ </doc>
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
+ The Secondary is responsible for supporting periodic checkpoints
+ of the HDFS metadata. The current design allows only one Secondary
+ NameNode per HDFs cluster.
+
+ The Secondary NameNode is a daemon that periodically wakes
+ up (determined by the schedule specified in the configuration),
+ triggers a periodic checkpoint and then goes back to sleep.
+ The Secondary NameNode uses the ClientProtocol to talk to the
+ primary NameNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StreamFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getting a client for connecting to dfs]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+ <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectNamenode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process an upgrade command.
+ RPC has only one very generic command for all upgrade related inter
+ component communications.
+ The actual command recognition and execution should be handled here.
+ The reply is sent back also as an UpgradeCommand.
+
+ @param command
+ @return the reply command which is analyzed on the client side.]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="forceProceed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Base class for name-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <interface name="FSNamesystemMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The state of the file system: Safemode or Operational
+ @return the state]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of allocated blocks in the system
+ @return - number of allocated blocks]]>
+ </doc>
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total storage capacity
+ @return - total capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Free (unused) storage capacity
+ @return - free capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used storage capacity
+ @return - used capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of files and directories
+ @return - num of files and directories]]>
+ </doc>
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks pending to be replicated
+ @return - num of blocks to be replicated]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks under replicated
+ @return - num of blocks under replicated]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks scheduled for replication
+ @return - num of blocks scheduled for replication]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total Load on the FSNamesystem
+ @return - total load of FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="numLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Live data nodes
+ @return number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="numDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return number of dead data nodes]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
+ a name node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+
+ Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the NameNodeStateMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Name Node runtime activity statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <class name="FSNamesystemMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.
+ We set the metrics value within this function before pushing it out.
+ FSNamesystem updates its own local variables which are
+ light weight compared to Metrics counters.
+
+ Some of the metrics are explicity casted to int. Few metrics collectors
+ do not handle long values. It is safe to cast to int for now as all these
+ values fit in int value.
+ Metrics related to DFS capacity are stored in bytes which do not fit in
+ int, so they are rounded to GB]]>
+ </doc>
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="filesTotal" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksTotal" type="org.apache.hadoop.metrics.util.MetricsLongValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityTotalGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityUsedGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityRemainingGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="totalLoad" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="pendingReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="underReplicatedBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="scheduledReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="missingBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various FSNamesystem status metrics
+ and publishing them through the metrics interfaces.
+ The SNamesystem creates and registers the JMX MBean.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #filesTotal}.set()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
+ <class name="NameNodeActivtyMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NameNodeActivtyMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
+ The MBean is register using the name
+ "hadoop:service=NameNode,name=NameNodeActivity"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+ <class name="NameNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various NameNode activity statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #syncs}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockCommand" type="int, java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for transferring blocks to another datanode
+ @param blocktargetlist blocks to be transferred]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for the given action
+ @param blocks blocks related to the action]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A BlockCommand is an instruction to a datanode
+ regarding some blocks under its control. It tells
+ the DataNode to either invalidate a set of indicated
+ blocks, or to copy a set of indicated blocks to
+ another DataNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
+ <class name="BlockMetaDataInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockMetaDataInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockMetaDataInfo" type="org.apache.hadoop.hdfs.protocol.Block, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLastScanTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Meta data information for a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <class name="BlocksWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with one parameter]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[getter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to implement an array of BlockLocations
+ It provide efficient customized serialization/deserialization methods
+ in stead of using the default array (de)serialization provided by RPC]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations.BlockWithLocations"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block]]>
+ </doc>
+ </method>
+ <method name="getDatanodes" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block's locations]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to keep track of a block and its locations]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <class name="DatanodeCommand" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DatanodeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAction" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <interface name="DatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
+ @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
+
+ @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
+ new storageID if the datanode did not have one and
+ registration ID for further communication.]]>
+ </doc>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
+ alive and well. Includes some status info, too.
+ It also gives the NameNode a chance to return
+ an array of "DatanodeCommand" objects.
+ A DatanodeCommand tells the DataNode to invalidate local block(s),
+ or to copy them to other DataNodes, etc.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
+ The NameNode returns an array of Blocks that have become obsolete
+ and should be deleted. This function is meant to upload *all*
+ the locally-stored blocks. It's invoked upon startup and then
+ infrequently afterwards.
+ @param registration
+ @param blocks - the block list as an array of longs.
+ Each block is represented as 2 longs.
+ This is done instead of Block[] to reduce memory used by block reports.
+
+ @return - the next command for DN to process.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
+ recently-received block data, with a hint for pereferred replica
+ to be deleted when there is any excessive blocks.
+ For example, whenever client code
+ writes a new Block here, or another DataNode copies a Block to
+ this DataNode, it will call blockReceived().]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[errorReport() tells the NameNode about something that has gone
+ awry. Useful for debugging.]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is a very general way to send a command to the name-node during
+ distributed upgrade process.
+
+ The generosity is because the variety of upgrade commands is unpredictable.
+ The reply from the name-node is also received in the form of an upgrade
+ command.
+
+ @return a reply in the form of an upgrade command]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
+ }]]>
+ </doc>
+ </method>
+ <method name="nextGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the next GenerationStamp to be associated with the specified
+ block.]]>
+ </doc>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Commit block synchronization in lease recovery]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[19: SendHeartbeat returns an array of DatanodeCommand objects
+ in stead of a DatanodeCommand object.]]>
+ </doc>
+ </field>
+ <field name="NOTIFY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DISK_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INVALID_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_UNKNOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Determines actions that data node should perform
+ when receiving a datanode command.]]>
+ </doc>
+ </field>
+ <field name="DNA_TRANSFER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_INVALIDATE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_SHUTDOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_REGISTER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_FINALIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_RECOVERBLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
+ It's used to upload current load information and block reports.
+
+ The only way a NameNode can communicate with a DataNode is by
+ returning values from these functions.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DatanodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeRegistration" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeRegistration]]>
+ </doc>
+ </constructor>
+ <method name="setInfoPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="infoPort" type="int"/>
+ </method>
+ <method name="setIpcPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ipcPort" type="int"/>
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeRegistration class conatins all information the Namenode needs
+ to identify and verify a Datanode when it contacts the Namenode.
+ This information is sent by Datanode with each communication request.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <class name="DisallowedDatanodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a datanode tries to register or communicate
+ with the namenode when it does not appear on the list of included nodes,
+ or has been specifically excluded.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <interface name="InterDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the BlockMetaDataInfo of a block;
+ null if the block is not found]]>
+ </doc>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="finalize" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the block to the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[3: added a finalize parameter to updateBlock]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An inter-datanode protocol for updating generation stamp]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <interface name="NamenodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a list of blocks belonged to <code>datanode</code>
+ whose total size is equal to <code>size</code>
+ @param datanode a data node
+ @param size requested size
+ @return a list of blocks & their locations
+ @throws RemoteException if size is less than or equal to 0 or
+ datanode does not exist]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the size of the current edit log (in bytes).
+ @return The number of bytes in the current edit log.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the current edit log and opens a new one. The
+ call fails if the file system is in SafeMode.
+ @throws IOException
+ @return a unique token to identify this transaction.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
+ new image to fsImage, removes the old edits and renames edits.new
+ to edits. The call fails if any of the four files are missing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[2: Added getEditLogSize(), rollEditLog(), rollFSImage().]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
+ It's used to get part of the name node state]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="NamespaceInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamespaceInfo" type="int, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDistributedUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[NamespaceInfo is returned by the name-node in reply
+ to a data-node handshake.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+ <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeCommand" type="int, int, short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="UC_ACTION_REPORT_STATUS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UC_ACTION_START_UPGRADE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This as a generic distributed upgrade command.
+
+ During the upgrade cluster components send upgrade commands to each other
+ in order to obtain or share information with them.
+ It is supposed that each upgrade defines specific upgrade command by
+ deriving them from this class.
+ The upgrade command contains version of the upgrade, which is verified
+ on the receiving side and current status of the upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <method name="report"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gives a report on how the FileSystem is doing.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Safe mode maintenance command.
+ Usage: java DFSAdmin -safemode [enter | leave | get]
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="saveNamespace" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to save the namespace.
+ Usage: java DFSAdmin -saveNamespace
+ @exception IOException
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="refreshNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
+ file.
+ Usage: java DFSAdmin -refreshNodes
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
+ Usage: java DFSAdmin -finalizeUpgrade
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="upgradeProgress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to request current distributed upgrade status,
+ a detailed status, or to force the upgrade to proceed.
+
+ Usage: java DFSAdmin -upgradeProgress [status | details | force]
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ Usage: java DFSAdmin -metasave filename
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if an error accoured wile accessing
+ the file or path.]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the authorization policy on the {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param argv The parameters passed to this program.
+ @exception Exception if the filesystem does not exist.
+ @return 0 on success, non zero on error.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides some DFS administrative access.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
+ <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf current Configuration
+ @throws Exception]]>
+ </doc>
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.
+ The tool also provides and option to filter open files during the scan.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
+</package>
+
+</api>
diff --git a/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.21.0.xml b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.21.0.xml
new file mode 100644
index 0000000..7fab725
--- /dev/null
+++ b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.21.0.xml
@@ -0,0 +1,16220 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Aug 24 10:48:17 PDT 2010 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop-hdfs 0.21.0"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /Users/tom/workspace/hadoop-hdfs-0.21-committer/build/ivy/lib/Hadoop-Hdfs/jdiff/jdiff-1.0.9.jar:/Users/tom/workspace/hadoop-hdfs-0.21-committer/build/ivy/lib/Hadoop-Hdfs/jdiff/xerces-1.4.4.jar -classpath /Users/tom/workspace/hadoop-hdfs-0.21-committer/build/classes:/Users/tom/workspace/hadoop-hdfs-0.21-committer/conf:/Users/tom/.ivy2/cache/org.apache.hadoop/hadoop-common/jars/hadoop-common-0.21.0-SNAPSHOT.jar:/Users/tom/.ivy2/cache/commons-cli/commons-cli/jars/commons-cli-1.2.jar:/Users/tom/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/Users/tom/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.1.jar:/Users/tom/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.4.jar:/Users/tom/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/Users/tom/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/Users/tom/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/Users/tom/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jsp-api-2.1/jars/jsp-api-2.1-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jsp-2.1/jars/jsp-2.1-6.1.14.jar:/Users/tom/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/Users/tom/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar:/Users/tom/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/Users/tom/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.7.1.jar:/Users/tom/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.1.1.jar:/Users/tom/.ivy2/cache/net.sf.kosmosfs/kfs/jars/kfs-0.3.jar:/Users/tom/.ivy2/cache/junit/junit/jars/junit-4.8.1.jar:/Users/tom/.ivy2/cache/hsqldb/hsqldb/jars/hsqldb-1.8.0.10.jar:/Users/tom/.ivy2/cache/org.apache.hadoop/avro/jars/avro-1.3.2.jar:/Users/tom/.ivy2/cache/org.codehaus.jackson/jackson-mapper-asl/jars/jackson-mapper-asl-1.4.2.jar:/Users/tom/.ivy2/cache/org.codehaus.jackson/jackson-core-asl/jars/jackson-core-asl-1.4.2.jar:/Users/tom/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.5.11.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer-ant/jars/paranamer-ant-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer-generator/jars/paranamer-generator-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.qdox/qdox/jars/qdox-1.10.1.jar:/Users/tom/.ivy2/cache/asm/asm/jars/asm-3.2.jar:/Users/tom/.ivy2/cache/commons-lang/commons-lang/jars/commons-lang-2.5.jar:/Users/tom/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/Users/tom/.ivy2/cache/org.aspectj/aspectjrt/jars/aspectjrt-1.6.5.jar:/Users/tom/.ivy2/cache/org.aspectj/aspectjtools/jars/aspectjtools-1.6.5.jar:/Users/tom/.ivy2/cache/org.mockito/mockito-all/jars/mockito-all-1.8.2.jar:/Users/tom/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/Users/tom/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/usr/share/ant/lib/ant-launcher.jar:/Users/tom/.ant/lib/ivy.jar:/usr/share/ant/lib/ant-antlr.jar:/usr/share/ant/lib/ant-jai.jar:/usr/share/ant/lib/ant-jmf.jar:/usr/share/ant/lib/ant-junit.jar:/usr/share/ant/lib/ant-nodeps.jar:/usr/share/ant/lib/ant-swing.jar:/usr/share/ant/lib/ant-testutil.jar:/usr/share/ant/lib/ant-trax.jar:/usr/share/ant/lib/ant.jar:/usr/share/ant/lib/ivy-2.1.0.jar:/usr/share/ant/lib/xercesImpl.jar:/usr/share/ant/lib/xml-apis.jar -sourcepath /Users/tom/workspace/hadoop-hdfs-0.21-committer/src/java -apidir /Users/tom/workspace/hadoop-hdfs-0.21-committer/lib/jdiff -apiname hadoop-hdfs 0.21.0 -->
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.Hdfs -->
+ <class name="Hdfs" extends="org.apache.hadoop.fs.AbstractFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getUriDefaultPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="createFlag" type="java.util.EnumSet"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatusIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="mkdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="renameInternal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="renameInternal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="supportsSymlinks" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="target" type="org.apache.hadoop.fs.Path"/>
+ <param name="link" type="org.apache.hadoop.fs.Path"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Hdfs -->
+</package>
+<package name="org.apache.hadoop.hdfs">
+ <!-- start class org.apache.hadoop.hdfs.BlockMissingException -->
+ <class name="BlockMissingException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockMissingException" type="java.lang.String, java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An exception that indicates that file was corrupted.
+ @param filename name of corrupted file
+ @param description a description of the corruption details]]>
+ </doc>
+ </constructor>
+ <method name="getFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the corrupted file.
+ @return name of corrupted file]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the offset at which this file is corrupted
+ @return offset of corrupted file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This exception is thrown when a read encounters a block that has no locations
+ associated with it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.BlockMissingException -->
+ <!-- start class org.apache.hadoop.hdfs.BlockReader -->
+ <class name="BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ </method>
+ <method name="readChunk" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksumBuf" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Java Doc required]]>
+ </doc>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="blockId" type="long"/>
+ <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <param name="genStamp" type="long"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[kind of like readFully(). Only reads as much as possible.
+ And allows use of protected readFully().]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a wrapper around connection to datadone
+ and understands checksum, offset etc]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.BlockReader -->
+ <!-- start class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
+ <class name="DeprecatedUTF8" extends="org.apache.hadoop.io.UTF8"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DeprecatedUTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DeprecatedUTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="DeprecatedUTF8" type="org.apache.hadoop.hdfs.DeprecatedUTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A simple wrapper around {@link org.apache.hadoop.io.UTF8}.
+ This class should be used only when it is absolutely necessary
+ to use {@link org.apache.hadoop.io.UTF8}. The only difference is that
+ using this class does not require "@SuppressWarning" annotation to avoid
+ javac warning. Instead the deprecation is implied in the class name.
+
+ This should be treated as package private class to HDFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient -->
+ <class name="DFSClient" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.io.Closeable"/>
+ <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="Deprecated at 0.21">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(NameNode.getAddress(conf), conf);
+ @see #DFSClient(InetSocketAddress, Configuration)
+ @deprecated Deprecated at 0.21]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(nameNodeAddr, conf, null);
+ @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(nameNodeAddr, null, conf, stats);
+ @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
+ </doc>
+ </constructor>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The locking hierarchy is to first acquire lock on DFSClient object, followed by
+ lock on leasechecker, followed by lock on an individual DFSOutputStream.]]>
+ </doc>
+ </method>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file system, abandoning all of the leases and files being
+ created and close connections to the namenode.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default block size for this cluster
+ @return the default block size in bytes]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get server default values for a number of configuration params.]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report corrupt blocks that were discovered by the client.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="start" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get block location info about file
+
+ getBlockLocations() returns a list of hostnames that store
+ data for a specific file region. It returns a set of hostnames
+ for every block within the indicated region.
+
+ This function is very useful when writing code that considers
+ data-placement when performing operations. For example, the
+ MapReduce system tries to schedule tasks on the same machines
+ as the data-block the task processes.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #open(String, int, boolean)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="buffersize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="stats" type="org.apache.hadoop.fs.FileSystem.Statistics"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create an input stream that obtains a nodelist from the
+ namenode, and then reads from all the right places. Creates
+ inner subclass of InputStream that does the right out-of-band
+ work.
+ @deprecated Use {@link #open(String, int, boolean)} instead.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="buffersize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create an input stream that obtains a nodelist from the
+ namenode, and then reads from all the right places. Creates
+ inner subclass of InputStream that does the right out-of-band
+ work.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a new dfs file and return an output stream for writing into it.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @return output stream
+ @throws UnresolvedLinkException if a symlink is encountered in src.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a new dfs file and return an output stream for writing into it
+ with write-progress reporting.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @return output stream
+ @throws UnresolvedLinkException if a symlink is encountered in src.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ and return an output stream for writing into the file.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @param replication block replication
+ @return output stream
+ @throws UnresolvedLinkException if a symlink is encountered in src.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the namenode associated with this DFSClient object
+ @return the namenode associated with this DFSClient object]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ with write-progress reporting and return an output stream for writing
+ into the file.
+
+ @param src stream name
+ @param overwrite do not check for file existence if true
+ @param replication block replication
+ @return output stream
+ @throws UnresolvedLinkException if a symlink is encountered in src.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Call
+ {@link #create(String,FsPermission,EnumSet,short,long,Progressable,int)}
+ with default permission.
+ @see FsPermission#getDefault()]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Call
+ {@link #create(String,FsPermission,EnumSet,boolean,short,long,Progressable,int)}
+ with createParent set to true.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ with write-progress reporting and return an output stream for writing
+ into the file.
+
+ @param src stream name
+ @param permission The permission of the directory being created.
+ If permission == null, use {@link FsPermission#getDefault()}.
+ @param flag do not check for file existence if true
+ @param createParent create missing parent directory if true
+ @param replication block replication
+ @return output stream
+ @throws IOException
+ @throws UnresolvedLinkException if src contains a symlink.
+ @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable, boolean, short, long)]]>
+ </doc>
+ </method>
+ <method name="primitiveCreate" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Same as {{@link #create(String, FsPermission, EnumSet, short, long,
+ Progressable, int)} except that the permission
+ is absolute (ie has already been masked with umask.]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Creates a symbolic link.
+
+ @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Resolve the *first* symlink, if any, in the path.
+
+ @see ClientProtocol#getLinkTarget(String)]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @see ClientProtocol#setReplication(String, short)
+ @param replication
+ @throws IOException
+ @return true is successful or false if file does not exist]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Rename file or directory.
+ See {@link ClientProtocol#rename(String, String)}.
+ @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Move blocks from src to trg and delete src
+ See {@link ClientProtocol#concat(String, String [])}.]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Rename file or directory.
+ See {@link ClientProtocol#rename(String, String, Options.Rename...)}]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Delete file or directory.
+ See {@link ClientProtocol#delete(String)}.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[delete file or directory.
+ delete contents of the directory if non empty and recursive
+ set to true]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implemented using getFileInfo(src)]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
+ if the application wants to fetch a listing starting from
+ the first entry in the directory
+
+ @param src the directory name
+ @param startAfter the name to start listing after encoded in java UTF8
+ @return a partial listing starting after startAfter]]>
+ </doc>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory. If src
+ refers to a symlink then the FileStatus of the link is returned.
+ @param src path to a file or directory.
+ @throws IOException
+ @throws UnresolvedLinkException if the path contains symlinks
+ @return FileStatus describing src.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+ @param src The file path
+ @return The checksum
+ @see DistributedFileSystem#getFileChecksum(Path)]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="socketFactory" type="javax.net.SocketFactory"/>
+ <param name="socketTimeout" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+ @param src The file path
+ @return The checksum]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set permissions to a file or directory.
+ @param src path name.
+ @param permission
+ @throws <code>FileNotFoundException</code> is file does not exist.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set file or directory owner.
+ @param src path name.
+ @param username user id.
+ @param groupname user group.
+ @throws <code>FileNotFoundException</code> is file does not exist.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+ See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
+ for more details.
+
+ @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
+ See {@link ClientProtocol#refreshNodes()}
+ for more details.
+
+ @see ClientProtocol#refreshNodes()]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ See {@link ClientProtocol#metaSave(String)}
+ for more details.
+
+ @see ClientProtocol#metaSave(String)]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param permission The permission of the directory being created.
+ If permission == null, use {@link FsPermission#getDefault()}.
+ @param createParent create missing parent directory if true
+ @return True if the operation success.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @see ClientProtocol#mkdirs(String, FsPermission, boolean)]]>
+ </doc>
+ </method>
+ <method name="primitiveMkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Same {{@link #mkdirs(String, FsPermission, boolean)} except
+ that the permissions has already been masked against umask.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[set the modification and access time of a file
+ @throws FileNotFoundException if the path is not a file]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SERVER_DEFAULTS_VALIDITY_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DFSClient can connect to a Hadoop Filesystem and
+ perform basic file tasks. It uses the ClientProtocol
+ to communicate with a NameNode daemon, and connects
+ directly to DataNodes to read/write block data.
+
+ Hadoop DFS users should obtain an instance of
+ DistributedFileSystem, which uses DFSClient to handle
+ filesystem tasks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
+ <class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSClient.DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the datanode from which the stream is currently reading.]]>
+ </doc>
+ </method>
+ <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block containing the target position.]]>
+ </doc>
+ </method>
+ <method name="getVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return The visible length of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The Hdfs implementation of {@link FSDataInputStream}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
+ <!-- start class org.apache.hadoop.hdfs.DFSConfigKeys -->
+ <class name="DFSConfigKeys" extends="org.apache.hadoop.fs.CommonConfigurationKeys"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSConfigKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="DFS_BLOCK_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_SIZE_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_DEFAULT" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_STREAM_BUFFER_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_STREAM_BUFFER_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BYTES_PER_CHECKSUM_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BYTES_PER_CHECKSUM_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_WRITE_PACKET_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_MAX_OBJECTS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_MAX_OBJECTS_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT" type="float"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_UPGRADE_PERMISSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_NEED_AUTH_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MIN_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MIN_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_ENABLED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_ENABLED_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_SUPERUSERGROUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_RESTORE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_LIST_LIMIT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_LIST_LIMIT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_EDITS_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_READ_PREFETCH_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_RETRY_WINDOW_BASE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_METRICS_SESSION_ID_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HOST_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_STORAGEID_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HOSTS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HOSTS_EXCLUDE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_SOCKET_TIMEOUT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BALANCER_MOVEDWINWIDTH_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BALANCER_MOVEDWINWIDTH_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_PERMISSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_INTERFACE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_INTERFACE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_NAMESERVER_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_NAMESERVER_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DU_RESERVED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DU_RESERVED_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HANDLER_COUNT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HANDLER_COUNT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_MAX_XCIEVERS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_MAX_XCIEVERS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_NUMBLOCKS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_NUMBLOCKS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HEARTBEAT_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HEARTBEAT_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HANDLER_COUNT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HANDLER_COUNT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SUPPORT_APPEND_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SUPPORT_APPEND_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HTTPS_ENABLE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HTTPS_ENABLE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_IPC_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_IPC_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_MAX_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_MAX_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DF_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DF_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INITIAL_DELAY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_PLUGINS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_STARTUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_PLUGINS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_WEB_UGI_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_STARTUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_KEYTAB_FILE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class contains constants for configuration keys used
+ in hdfs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSConfigKeys -->
+ <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
+ <class name="DFSUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isValidName" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
+ and names which contain a ":" or "/"]]>
+ </doc>
+ </method>
+ <method name="login"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keytabFileKey" type="java.lang.String"/>
+ <param name="userNameKey" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If a keytab has been provided, login as that user.]]>
+ </doc>
+ </method>
+ <method name="bytes2String" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Converts a byte array to a string using UTF8 encoding.]]>
+ </doc>
+ </method>
+ <method name="string2Bytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Converts a string to a byte array using UTF8 encoding.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
+ <!-- start class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
+ <class name="DFSUtil.ErrorSimulator" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSUtil.ErrorSimulator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initializeErrorSimulationEvent"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numberOfEvents" type="int"/>
+ </method>
+ <method name="getErrorSimulation" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="setErrorSimulation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="clearErrorSimulation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[Utility class to facilitate junit test error simulation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Permit paths which explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Normalize paths that explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as create(), except fails if parent directory doesn't already exist.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="org.apache.hadoop.fs.Path"/>
+ <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[THIS IS DFS only operations, it is not part of FileSystem
+ move blocks from srcs to trg
+ and delete srcs afterwards
+ all blocks should be the same size
+ @param trg existing file to append to
+ @param psrcs list of files (same block size, same replication)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ This rename operation is guaranteed to be atomic.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set a directory's quotas
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List all the entries of a directory
+
+ Note that this operation is not atomic for a large directory.
+ The entries of a directory may be fetched from NameNode multiple times.
+ It only guarantees that each name occurs once if a directory
+ undergoes changes between the calls.]]>
+ </doc>
+ </method>
+ <method name="mkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory with given name and permission, only when
+ parent directory exists.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="primitiveMkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the disk usage of the filesystem, including total capacity,
+ used space, and remaining space
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication.
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication.
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
+ FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[enable/disable/check restoreFaileStorage
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
+ files.]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.
+ @throws FileNotFoundException if the file does not exist.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a valid Delegation Token.
+
+ @param renewer Name of the designated renewer for the token
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renew an existing delegation token.
+
+ @param token delegation token obtained earlier
+ @return the new expiration time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Cancel an existing delegation token.
+
+ @param token delegation token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implementation of the abstract FileSystem for the DFS system.
+ This object is the way end-user code interacts with a Hadoop
+ DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <class name="DistributedFileSystem.DiskStatus" extends="org.apache.hadoop.fs.FsStatus"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FsStatus} instead">
+ <constructor name="DistributedFileSystem.DiskStatus" type="org.apache.hadoop.fs.FsStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[@deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <!-- start class org.apache.hadoop.hdfs.HdfsConfiguration -->
+ <class name="HdfsConfiguration" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HdfsConfiguration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="HdfsConfiguration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="HdfsConfiguration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Adds deprecated keys into the configuration.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HdfsConfiguration -->
+ <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HDFSPolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDateFormat" return="java.text.SimpleDateFormat"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
+ @param path The path component of the URL
+ @param query The query component of the URL]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="nnAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ran" type="java.util.Random"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="HFTP_TIMEZONE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HFTP_DATE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="df" type="java.lang.ThreadLocal"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTP.
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HsftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS. The
+ following implementation provides a limited, read-only interface to a
+ filesystem over HTTPS.
+
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="javax.net.ssl.HostnameVerifier"/>
+ <constructor name="HsftpFileSystem.DummyHostnameVerifier"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="verify" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostname" type="java.lang.String"/>
+ <param name="session" type="javax.net.ssl.SSLSession"/>
+ </method>
+ <doc>
+ <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
+ <class name="HsftpFileSystem.DummyTrustManager" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="javax.net.ssl.X509TrustManager"/>
+ <constructor name="HsftpFileSystem.DummyTrustManager"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="checkClientTrusted"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="chain" type="java.security.cert.X509Certificate[]"/>
+ <param name="authType" type="java.lang.String"/>
+ </method>
+ <method name="checkServerTrusted"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="chain" type="java.security.cert.X509Certificate[]"/>
+ <param name="authType" type="java.lang.String"/>
+ </method>
+ <method name="getAcceptedIssuers" return="java.security.cert.X509Certificate[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Dummy trustmanager that is used to trust all server certificates]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+ <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception that happens when you ask to create a file that already
+ is being created, but is not closed yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="java.io.File, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the blockid from the given filename]]>
+ </doc>
+ </constructor>
+ <method name="isBlockFilename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.io.File"/>
+ </method>
+ <method name="filename2id" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="isMetaFilename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metaFile" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get generation stamp from the name of the metafile name]]>
+ </doc>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metaFile" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the blockId from the name of the metafile name]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <param name="len" type="long"/>
+ <param name="genStamp" type="long"/>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setBlockId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bid" type="long"/>
+ </method>
+ <method name="getBlockName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytes" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setNumBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="BLOCK_FILE_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="METADATA_EXTENSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockFilePattern" type="java.util.regex.Pattern"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metaFilePattern" type="java.util.regex.Pattern"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Block is a Hadoop FS primitive, identified by a
+ long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <class name="BlockListAsLongs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <constructor name="BlockListAsLongs" type="java.util.List, java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create block report from finalized and under construction lists of blocks.
+
+ @param finalized - list of finalized blocks
+ @param uc - list of under construction blocks]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockListAsLongs"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockListAsLongs" type="long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param iBlockList - BlockListALongs create from this long[] parameter]]>
+ </doc>
+ </constructor>
+ <method name="getBlockListAsLongs" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an iterator over blocks in the block report.]]>
+ </doc>
+ </method>
+ <method name="getBlockReportIterator" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns {@link BlockReportIterator}.]]>
+ </doc>
+ </method>
+ <method name="getNumberOfBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of blocks
+ @return - the number of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-id of the indexTh block
+ @param index - the block whose block-id is desired
+ @return the block-id]]>
+ </doc>
+ </method>
+ <method name="getBlockLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-len of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the block-len]]>
+ </doc>
+ </method>
+ <method name="getBlockGenStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The generation stamp of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the generation stamp]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides an interface for accessing list of blocks that
+ has been implemented as long[].
+ This class is useful for block report. Rather than send block reports
+ as a Block[] we can send it as a long[].
+
+ The structure of the array is as follows:
+ 0: the length of the finalized replica list;
+ 1: the length of the under-construction replica list;
+ - followed by finalized replica list where each replica is represented by
+ 3 longs: one for the blockId, one for the block length, and one for
+ the generation stamp;
+ - followed by the invalid replica represented with three -1s;
+ - followed by the under-construction replica list where each replica is
+ represented by 4 longs: three for the block id, length, generation
+ stamp, and the forth for the replica state.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
+ <class name="BlockListAsLongs.BlockReportIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Iterator"/>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the state of the current replica.
+ The state corresponds to the replica returned
+ by the latest {@link #next()}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Iterates over blocks in the block report.
+ Avoids object allocation on each iteration.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <interface name="ClientDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the visible length of a replica.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[6: recoverBlock() removed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An client-datanode protocol for block recovery]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <interface name="ClientProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <doc>
+ <![CDATA[Get locations of the blocks of the specified file within the specified range.
+ DataNode locations for each block are sorted by
+ the proximity to the client.
+ <p>
+ Return {@link LocatedBlocks} which contains
+ file length, blocks and their locations.
+ DataNode locations for each block are sorted by
+ the distance to the client's address.
+ <p>
+ The client will then have to contact
+ one of the indicated DataNodes to obtain the actual data.
+
+ @param src file name
+ @param offset range start offset
+ @param length range length
+ @return file length and array of blocks with their locations
+ @throws IOException
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws FileNotFoundException if the path does not exist.]]>
+ </doc>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get server default values for a number of configuration params.
+ @return a set of server default configuration values
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="AlreadyBeingCreatedException" type="org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException"/>
+ <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
+ <doc>
+ <![CDATA[Create a new file entry in the namespace.
+ <p>
+ This will create an empty file specified by the source path.
+ The path should reflect a full path originated at the root.
+ The name-node does not have a notion of "current" directory for a client.
+ <p>
+ Once created, the file is visible and available for read to other clients.
+ Although, other clients cannot {@link #delete(String, boolean)}, re-create or
+ {@link #rename(String, String)} it until the file is completed
+ or explicitly as a result of lease expiration.
+ <p>
+ Blocks have a maximum size. Clients that intend to create
+ multi-block files must also use {@link #addBlock(String, String, Block, DatanodeInfo[])}.
+
+ @param src path of the file being created.
+ @param masked masked permission.
+ @param clientName name of the current client.
+ @param flag indicates whether the file should be
+ overwritten if it already exists or create if it does not exist or append.
+ @param createParent create missing parent directory if true
+ @param replication block replication factor.
+ @param blockSize maximum block size.
+
+ @throws AccessControlException if permission to create file is
+ denied by the system. As usually on the client side the exception will
+ be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ @throws QuotaExceededException if the file creation violates
+ any quota restriction
+ @throws IOException if other errors occur.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws AlreadyBeingCreatedException if the path does not exist.
+ @throws NSQuotaExceededException if the namespace quota is exceeded.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Append to the end of the file.
+ @param src path of the file being created.
+ @param clientName name of the current client.
+ @return information about the last partial block if any.
+ @throws AccessControlException if permission to append file is
+ denied by the system. As usually on the client side the exception will
+ be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ Allows appending to an existing file if the server is
+ configured with the parameter dfs.support.append set to true, otherwise
+ throws an IOException.
+ @throws IOException if other errors occur.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ <p>
+ The NameNode sets replication to the new value and returns.
+ The actual block replication is not expected to be performed during
+ this method call. The blocks will be populated or removed in the
+ background as the result of the routine block maintenance procedures.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file/directory.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set Owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param src
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[The client can give up on a block by calling abandonBlock().
+ The client can then either obtain a new block, or complete or
+ abandon the file. Any partial writes to the block will be discarded.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
+ <doc>
+ <![CDATA[A client that wants to write an additional block to the
+ indicated filename (which must currently be open for writing)
+ should call addBlock().
+
+ addBlock() allocates a new block and datanodes the block data
+ should be replicated to.
+
+ addBlock() also commits the previous block by reporting
+ to the name-node the actual generation stamp and the length
+ of the block that the client has transmitted to data-nodes.
+
+ @param src the file being created
+ @param clientName the name of the client that adds the block
+ @param previous previous block
+ @param excludedNodes a list of nodes that should not be
+ allocated for the current block
+ @return LocatedBlock allocated block information.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws DSQuotaExceededException if the directory's quota is exceeded.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[The client is done writing data to the given filename, and would
+ like to complete it.
+
+ The function returns whether the file has been closed successfully.
+ If the function returns false, the caller should try again.
+
+ close() also commits the last block of the file by reporting
+ to the name-node the actual generation stamp and the length
+ of the block that the client has transmitted to data-nodes.
+
+ A call to complete() will not return true until all the file's
+ blocks have been replicated the minimum number of times. Thus,
+ DataNode failures may cause a client to call complete() several
+ times before succeeding.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client wants to report corrupted blocks (blocks with specified
+ locations on datanodes).
+ @param blocks Array of located blocks to report]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Rename an item in the file system namespace.
+ @param src existing file or directory name.
+ @param dst new name.
+ @return true if successful, or false if the old name does not exist
+ or if the new name already belongs to the namespace.
+ @throws IOException if the new name is invalid.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws QuotaExceededException if the rename would violate
+ any quota restriction
+ @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Moves blocks from srcs to trg and delete srcs
+
+ @param trg existing file
+ @param srcs - list of existing files (same block size, same replication)
+ @throws IOException if some arguments are invalid
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws QuotaExceededException if the rename would violate
+ any quota restriction]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Rename src to dst.
+ <ul>
+ <li>Fails if src is a file and dst is a directory.
+ <li>Fails if src is a directory and dst is a file.
+ <li>Fails if the parent of dst does not exist or is a file.
+ </ul>
+ <p>
+ Without OVERWRITE option, rename fails if the dst already exists.
+ With OVERWRITE option, rename overwrites the dst, if it is a file
+ or an empty directory. Rename fails if dst is a non-empty directory.
+ <p>
+ This implementation of rename is atomic.
+ <p>
+ @param src existing file or directory name.
+ @param dst new name.
+ @param options Rename options
+ @throws IOException if rename failed
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #delete(String, boolean)} istead.">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ Any blocks belonging to the deleted files will be garbage-collected.
+
+ @param src existing name.
+ @return true only if the existing file or directory was actually removed
+ from the file system.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @deprecated use {@link #delete(String, boolean)} istead.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ same as delete but provides a way to avoid accidentally
+ deleting non empty directories programmatically.
+ @param src existing name
+ @param recursive if true deletes a non empty directory recursively,
+ else throws an exception.
+ @return true only if the existing file or directory was actually removed
+ from the file system.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param masked The masked permission of the directory being created
+ @param createParent create missing parent directory if true
+ @return True if the operation success.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws {@link AccessControlException} if permission to create file is
+ denied by the system. As usually on the client side the exception will
+ be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
+ @throws QuotaExceededException if the operation would violate
+ any quota restriction.]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ @param src the directory name
+ @param startAfter the name to start listing after encoded in java UTF8
+ @return a partial listing starting after startAfter
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Client programs can cause stateful changes in the NameNode
+ that affect other clients. A client may obtain a file and
+ neither abandon nor complete it. A client might hold a series
+ of locks that prevent other clients from proceeding.
+ Clearly, it would be bad if a client held a bunch of locks
+ that it never gave up. This can happen easily if the client
+ dies unexpectedly.
+ <p>
+ So, the NameNode will revoke the locks and live file-creates
+ for clients that it thinks have died. A client tells the
+ NameNode that it is still alive by periodically calling
+ renewLease(). If a certain amount of time passes since
+ the last call to renewLease(), the NameNode assumes the
+ client has died.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a set of statistics about the filesystem.
+ Right now, only three values are returned.
+ <ul>
+ <li> [0] contains the total storage capacity of the system, in bytes.</li>
+ <li> [1] contains the total used space of the system, in bytes.</li>
+ <li> [2] contains the available storage of the system, in bytes.</li>
+ <li> [3] contains number of under replicated blocks in the system.</li>
+ <li> [4] contains number of blocks with a corrupt replica. </li>
+ <li> [5] contains number of blocks without any good replicas left. </li>
+ </ul>
+ Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
+ actual numbers to index into the array.]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a report on the system's current datanodes.
+ One DatanodeInfo object is returned for each DataNode.
+ Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
+ otherwise all datanodes if type is ALL.]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get the block size for the given file.
+ @param filename The name of the file
+ @return The number of bytes in each block
+ @throws IOException
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+ <p>
+ Safe mode is a name node state when it
+ <ol><li>does not accept changes to name space (read-only), and</li>
+ <li>does not replicate or delete blocks.</li></ol>
+
+ <p>
+ Safe mode is entered automatically at name node startup.
+ Safe mode can also be entered manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ <p>
+ At startup the name node accepts data node reports collecting
+ information about block locations.
+ In order to leave safe mode it needs to collect a configurable
+ percentage called threshold of blocks, which satisfy the minimal
+ replication condition.
+ The minimal replication condition is that each block must have at least
+ <tt>dfs.namenode.replication.min</tt> replicas.
+ When the threshold is reached the name node extends safe mode
+ for a configurable amount of time
+ to let the remaining data nodes to check in before it
+ will start replicating missing blocks.
+ Then the name node leaves safe mode.
+ <p>
+ If safe mode is turned on manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ then the name node stays in safe mode until it is manually turned off
+ using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ Current state of the name node can be verified using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ <h4>Configuration parameters:</h4>
+ <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
+ <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
+ <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
+
+ <h4>Special cases:</h4>
+ The name node does not enter safe mode at startup if the threshold is
+ set to 0 or if the name space is empty.<br>
+ If the threshold is set to 1 then all blocks need to have at least
+ minimal replication.<br>
+ If the threshold value is greater than 1 then the name node will not be
+ able to turn off safe mode automatically.<br>
+ Safe mode can always be turned off manually.
+
+ @param action <ul> <li>0 leave safe mode;</li>
+ <li>1 enter safe mode;</li>
+ <li>2 get safe mode state.</li></ul>
+ @return <ul><li>0 if the safe mode is OFF or</li>
+ <li>1 if the safe mode is ON.</li></ul>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+ <p>
+ Saves current namespace into storage directories and reset edits log.
+ Requires superuser privilege and safe mode.
+
+ @throws AccessControlException if the superuser privilege is violated.
+ @throws IOException if image creation failed.]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[Enable/Disable restore failed storage.
+ <p>
+ sets flag to enable restore of failed storage replicas
+
+ @throws AccessControlException if the superuser privilege is violated.]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Tells the namenode to reread the hosts and exclude files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previous upgrade.
+ Remove file system state saved during the upgrade.
+ The upgrade will become irreversible.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
+
+ @param action {@link FSConstants.UpgradeAction} to perform
+ @return upgrade status information or null if no upgrades are in progress
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode data structures into specified file. If the file
+ already exists, then append.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptFiles" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return Array of FileStatus objects referring to corrupted files.
+ The server could return all or a few of the files that are corrupt.
+ @throws AccessControlException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory.
+ @param src The string representation of the path to the file
+ @throws UnresolvedLinkException if the path contains symlinks;
+ IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory. If the path
+ refers to a symlink then the FileStatus of the symlink is returned.
+ @param src The string representation of the path to the file
+ @throws UnresolvedLinkException if the path contains symlinks;
+ IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
+ @param path The string representation of the path
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <doc>
+ <![CDATA[Set the quota for a directory.
+ @param path The string representation of the path to the directory
+ @param namespaceQuota Limit on the number of names in the tree rooted
+ at the directory
+ @param diskspaceQuota Limit on disk space occupied all the files under
+ this directory.
+ <br><br>
+
+ The quota can have three types of values : (1) 0 or more will set
+ the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
+ the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
+ implies the quota will be reset. Any other value is a runtime error.
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws FileNotFoundException if the path is a file or
+ does not exist
+ @throws QuotaExceededException if the directory size
+ is greater than the given quota]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="client" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Write all metadata for this file into persistent storage.
+ The file must be currently open for writing.
+ @param src The string representation of the path
+ @param client The string representation of the client
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Sets the modification and access time of the file to the specified time.
+ @param src The string representation of the path
+ @param mtime The number of milliseconds since Jan 1, 1970.
+ Setting mtime to -1 means that modification time should not be set
+ by this call.
+ @param atime The number of milliseconds since Jan 1, 1970.
+ Setting atime to -1 means that access time should not be set
+ by this call.
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a symbolic link to a file or directory.
+ @param target The pathname of the destination that the
+ link points to.
+ @param link The pathname of the link being created.
+ @param dirPerm permissions to use when creating parent directories
+ @param createParent - if true then missing parent dirs are created
+ if false then parent must exist
+ @throws IOException
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Resolve the first symbolic link on the specified path.
+ @param path The pathname that needs to be resolved
+ @return The pathname after resolving the first symbolic link if any.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a new generation stamp together with an access token for
+ a block under construction
+
+ This method is called only when a client needs to recover a failed
+ pipeline or set up a pipeline for appending to a block.
+
+ @param block a block
+ @param clientName the name of the client
+ @return a located block with a new generation stamp and an access token
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="updatePipeline"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update a pipeline for a block under construction
+
+ @param clientName the name of the client
+ @param oldBlock the old block
+ @param newBlock the new block containing new generation stamp and length
+ @param newNodes datanodes in the pipeline
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a valid Delegation Token.
+
+ @param renewer the designated renewer for the token
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renew an existing delegation token.
+
+ @param token delegation token obtained earlier
+ @return the new expiration time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Cancel an existing delegation token.
+
+ @param token delegation token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compared to the previous version the following changes have been introduced:
+ (Only the latest change is reflected.
+ The log of historical changes can be retrieved from the svn).
+ 60: Replace full getListing with iterative getListinng.]]>
+ </doc>
+ </field>
+ <field name="GET_STATS_CAPACITY_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_USED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_REMAINING_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[ClientProtocol is used by user code via
+ {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
+ with the NameNode. User code can manipulate the directory namespace,
+ as well as open/close file streams, etc.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <class name="DatanodeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID("").]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID copy constructor
+
+ @param from]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeID
+ @param nodeName (hostname:portNumber)
+ @param storageID data storage ID
+ @param infoPort info server port
+ @param ipcPort ipc server port]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname:portNumber.]]>
+ </doc>
+ </method>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
+ </doc>
+ </method>
+ <method name="getIpcPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
+ </doc>
+ </method>
+ <method name="setStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storageID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[sets the data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname and no :portNumber.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="updateRegInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Update fields when a new registration request comes in.
+ Note that this does not update storageID.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Comparable.
+ Basis of compare is the String name (host:portNumber) only.
+ @param that
+ @return as specified by Comparable.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageID" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="infoPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeID is composed of the data node
+ name (hostname:portNumber) and the data storage ID,
+ which it currently represents.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw capacity.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getNonDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node as percentage of present capacity]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw free space.]]>
+ </doc>
+ </method>
+ <method name="getRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The remaining space as percentage of configured capacity.]]>
+ </doc>
+ </method>
+ <method name="getLastUpdate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="getXceiverCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[number of active connections]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="capacity" type="long"/>
+ <doc>
+ <![CDATA[Sets raw capacity.]]>
+ </doc>
+ </method>
+ <method name="setRemaining"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="remaining" type="long"/>
+ <doc>
+ <![CDATA[Sets raw free space.]]>
+ </doc>
+ </method>
+ <method name="setLastUpdate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lastUpdate" type="long"/>
+ <doc>
+ <![CDATA[Sets time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="setXceiverCount"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xceiverCount" type="int"/>
+ <doc>
+ <![CDATA[Sets number of active connections]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[rack name]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the rack name]]>
+ </doc>
+ </method>
+ <method name="getHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setHostName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="getDatanodeReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for reporting the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="dumpDatanode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for printing the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="startDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="isDecommissionInProgress" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node is in the process of being decommissioned]]>
+ </doc>
+ </method>
+ <method name="isDecommissioned" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node has been decommissioned.]]>
+ </doc>
+ </method>
+ <method name="setDecommissioned"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sets the admin state to indicate that decommission is complete.]]>
+ </doc>
+ </method>
+ <method name="setAdminState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
+ <doc>
+ <![CDATA[Sets the admin state of this node.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a DatanodeInfo]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <field name="capacity" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dfsUsed" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="remaining" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="lastUpdate" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="xceiverCount" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="hostName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HostName as supplied by the datanode during registration as its
+ name. Namenode uses datanode IP address as the name.]]>
+ </doc>
+ </field>
+ <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeInfo represents the status of a DataNode.
+ This object is used for communication in the
+ Datanode Protocol and the Client Protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <interface name="DataTransferProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="DATA_TRANSFER_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version for data transfers between clients and datanodes
+ This should change when serialization of DatanodeInfo, not just
+ when protocol changes. It is not very obvious.]]>
+ </doc>
+ </field>
+ <field name="OP_WRITE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.WRITE_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.WRITE_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_READ_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.READ_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.READ_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_READ_METADATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="As of version 15, OP_READ_METADATA is no longer supported.">
+ <doc>
+ <![CDATA[@deprecated As of version 15, OP_READ_METADATA is no longer supported.]]>
+ </doc>
+ </field>
+ <field name="OP_REPLACE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_COPY_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.COPY_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.COPY_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_BLOCK_CHECKSUM" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.SUCCESS instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.SUCCESS instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_INVALID" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_INVALID instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_INVALID instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_EXISTS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_EXISTS instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_EXISTS instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_ACCESS_TOKEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_CHECKSUM_OK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.CHECKSUM_OK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.CHECKSUM_OK instead.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Transfer data to/from datanode using a streaming protocol.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
+ <class name="DataTransferProtocol.BlockConstructionStage" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getRecoveryStage" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the recovery stage of this stage]]>
+ </doc>
+ </method>
+ <field name="PIPELINE_SETUP_APPEND" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The enumerates are always listed as regular stage followed by the
+ recovery stage.
+ Changing this order will make getRecoveryStage not working.]]>
+ </doc>
+ </field>
+ <field name="PIPELINE_SETUP_APPEND_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_STREAMING" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_SETUP_STREAMING_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_CLOSE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_CLOSE_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_SETUP_CREATE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
+ <class name="DataTransferProtocol.Op" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="WRITE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_METADATA" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REPLACE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COPY_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="code" type="byte"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The code for this operation.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Operation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
+ <class name="DataTransferProtocol.PipelineAck" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DataTransferProtocol.PipelineAck"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DataTransferProtocol.PipelineAck" type="long, org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param seqno sequence number
+ @param replies an array of replies]]>
+ </doc>
+ </constructor>
+ <method name="getSeqno" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the sequence number
+ @return the sequence number]]>
+ </doc>
+ </method>
+ <method name="getNumOfReplies" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of replies
+ @return the number of replies]]>
+ </doc>
+ </method>
+ <method name="getReply" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[get the ith reply
+ @return the the ith reply]]>
+ </doc>
+ </method>
+ <method name="isSuccess" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if this ack contains error status
+ @return true if all statuses are SUCCESS]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writable interface]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="UNKOWN_SEQNO" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[reply]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
+ <class name="DataTransferProtocol.Receiver" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataTransferProtocol.Receiver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readOp" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Op. It also checks protocol version.]]>
+ </doc>
+ </method>
+ <method name="processOp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process op by the corresponding method.]]>
+ </doc>
+ </method>
+ <method name="opReadBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_READ_BLOCK method.
+ Read a block.]]>
+ </doc>
+ </method>
+ <method name="opWriteBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="pipelineSize" type="int"/>
+ <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
+ <param name="newGs" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_WRITE_BLOCK method.
+ Write a block.]]>
+ </doc>
+ </method>
+ <method name="opReplaceBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="sourceId" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_REPLACE_BLOCK method.
+ It is used for balancing purpose; send to a destination]]>
+ </doc>
+ </method>
+ <method name="opCopyBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_COPY_BLOCK method.
+ It is used for balancing purpose; send to a proxy source.]]>
+ </doc>
+ </method>
+ <method name="opBlockChecksum"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_BLOCK_CHECKSUM method.
+ Get the checksum of a block]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Receiver]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
+ <class name="DataTransferProtocol.Sender" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataTransferProtocol.Sender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a operation.]]>
+ </doc>
+ </method>
+ <method name="opReadBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="blockOffset" type="long"/>
+ <param name="blockLen" type="long"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_READ_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opWriteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="pipelineSize" type="int"/>
+ <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
+ <param name="newGs" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_WRITE_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opReplaceBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="storageId" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_REPLACE_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opCopyBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_COPY_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opBlockChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blockId" type="long"/>
+ <param name="blockGs" type="long"/>
+ <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_BLOCK_CHECKSUM]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sender]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
+ <class name="DataTransferProtocol.Status" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <method name="writeOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_INVALID" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_EXISTS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_ACCESS_TOKEN" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_OK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Status]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
+ <class name="DirectoryListing" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DirectoryListing"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DirectoryListing" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[], int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param partialListing a partial listing of a directory
+ @param remainingEntries number of entries that are left to be listed]]>
+ </doc>
+ </constructor>
+ <method name="getPartialListing" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the partial listing of file status
+ @return the partial listing of file status]]>
+ </doc>
+ </method>
+ <method name="getRemainingEntries" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of remaining entries that are left to be listed
+ @return the number of remaining entries that are left to be listed]]>
+ </doc>
+ </method>
+ <method name="hasMore" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if there are more entries that are left to be listed
+ @return true if there are more entries that are left to be listed;
+ return false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getLastName" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the last name in this list
+ @return the last name in the list if it is not empty; otherwise return null]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class defines a partial listing of a directory to support
+ iterative directory listing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
+ <class name="DSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DSQuotaExceededException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DSQuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DSQuotaExceededException" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <interface name="FSConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="MIN_BLOCKS_FOR_WRITE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_INVALIDATE_CHUNK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_DONT_SET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_RESET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEARTBEAT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_HARDLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_RECOVER_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_DEPTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SMALL_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BYTES_PER_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_WRITE_PACKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_REPLICATION_FACTOR" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_FILE_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HDFS_URI_SCHEME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[URI Scheme for hdfs://namenode/ URIs.]]>
+ </doc>
+ </field>
+ <field name="LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Distributed upgrade actions:
+
+ 1. Get upgrade status.
+ 2. Get detailed upgrade status.
+ 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
+ <class name="HdfsFileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="HdfsFileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="HdfsFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param length the number of bytes the file has
+ @param isdir if the path is a directory
+ @param block_replication the replication factor
+ @param blocksize the block size
+ @param modification_time modification time
+ @param access_time access time
+ @param permission permission
+ @param owner the owner of the path
+ @param group the group of the path
+ @param path the local name in java UTF8 encoding the same as that in-memory]]>
+ </doc>
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of this file, in bytes.
+ @return the length of this file, in bytes.]]>
+ </doc>
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="isSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a symbolic link?
+ @return true if this is a symbolic link]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file.]]>
+ </doc>
+ </method>
+ <method name="isEmptyLocalName" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if the local name is empty
+ @return true if the name is empty]]>
+ </doc>
+ </method>
+ <method name="getLocalName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the string representation of the local name
+ @return the local name in string]]>
+ </doc>
+ </method>
+ <method name="getLocalNameInBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Java UTF8 representation of the local name
+ @return the local name in java UTF8]]>
+ </doc>
+ </method>
+ <method name="getFullName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the string representation of the full path name
+ @param parent the parent path
+ @return the full path in string]]>
+ </doc>
+ </method>
+ <method name="getFullPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Get the full path
+ @param parent the parent path
+ @return the full path]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the string representation of the symlink.
+ @return the symlink as a string.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_NAME" type="byte[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Interface that represents the over the wire information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <class name="LocatedBlock" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlock"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAccessToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setAccessToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ </method>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isCorrupt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read LocatedBlock from in.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
+ objects. It tells where to find a Block.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <class name="LocatedBlocks" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlocks" type="long, boolean, java.util.List, org.apache.hadoop.hdfs.protocol.LocatedBlock, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLocatedBlocks" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get located blocks.]]>
+ </doc>
+ </method>
+ <method name="getLastLocatedBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the last located block.]]>
+ </doc>
+ </method>
+ <method name="isLastBlockComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the last block completed?]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[Get located block.]]>
+ </doc>
+ </method>
+ <method name="locatedBlockCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get number of located blocks.]]>
+ </doc>
+ </method>
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isUnderConstruction" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return ture if file was under construction when
+ this LocatedBlocks was constructed, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="findBlock" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Find block containing specified offset.
+
+ @return block if found, or null otherwise.]]>
+ </doc>
+ </method>
+ <method name="insertRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockIdx" type="int"/>
+ <param name="newBlocks" type="java.util.List"/>
+ </method>
+ <method name="getInsertIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="binSearchResult" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collection of blocks with their locations and the file length.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
+ <class name="NSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NSQuotaExceededException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NSQuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NSQuotaExceededException" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <class name="QuotaExceededException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="QuotaExceededException"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="long, long"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPathName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="pathName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="quota" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="count" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when modification to HDFS results in violation
+ of a directory quota. A directory quota might be namespace quota (limit
+ on number of files and directories) or a diskspace quota (limit on space
+ taken by all the file under the directory tree). <br> <br>
+
+ The message for the exception specifies the directory where the quota
+ was violated and actual quotas. Specific message is generated in the
+ corresponding Exception class:
+ DSQuotaExceededException or
+ NSQuotaExceededException]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
+ <class name="RecoveryInProgressException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecoveryInProgressException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Exception indicating that a replica is already being recovery.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
+ <class name="UnregisteredNodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The exception is thrown if a different data-node claims the same
+ storage id as the existing one.
+
+ @param nodeID unregistered data-node
+ @param storedNode data-node stored in the system with this storage id]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a node that has not previously
+ registered is trying to access the name node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
+ <class name="UnresolvedPathException" extends="org.apache.hadoop.fs.UnresolvedLinkException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnresolvedPathException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used by RemoteException to instantiate an UnresolvedPathException.]]>
+ </doc>
+ </constructor>
+ <constructor name="UnresolvedPathException" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUnresolvedPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getResolvedPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown when a symbolic link is encountered in a path.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
+</package>
+<package name="org.apache.hadoop.hdfs.security">
+ <!-- start class org.apache.hadoop.hdfs.security.AccessTokenHandler -->
+ <class name="AccessTokenHandler" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessTokenHandler" type="boolean, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructor
+
+ @param isMaster
+ @param keyUpdateInterval
+ @param tokenLifetime
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="exportKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Export access keys, only to be used in master mode]]>
+ </doc>
+ </method>
+ <method name="setKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exportedKeys" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set access keys, only to be used in slave mode]]>
+ </doc>
+ </method>
+ <method name="updateKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update access keys, only to be used in master mode]]>
+ </doc>
+ </method>
+ <method name="generateToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockID" type="long"/>
+ <param name="modes" type="java.util.EnumSet"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate an access token for current user]]>
+ </doc>
+ </method>
+ <method name="generateToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="userID" type="java.lang.String"/>
+ <param name="blockID" type="long"/>
+ <param name="modes" type="java.util.EnumSet"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate an access token for a specified user]]>
+ </doc>
+ </method>
+ <method name="checkAccess" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <param name="userID" type="java.lang.String"/>
+ <param name="blockID" type="long"/>
+ <param name="mode" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if access should be allowed. userID is not checked if null]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[AccessTokenHandler can be instantiated in 2 modes, master mode and slave
+ mode. Master can generate new access keys and export access keys to slaves,
+ while slaves can only import and use access keys received from master. Both
+ master and slave can generate and verify access tokens. Typically, master
+ mode is used by NN and slave mode is used by DN.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.AccessTokenHandler -->
+ <!-- start class org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode -->
+ <class name="AccessTokenHandler.AccessMode" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="READ" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COPY" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REPLACE" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode -->
+ <!-- start class org.apache.hadoop.hdfs.security.BlockAccessKey -->
+ <class name="BlockAccessKey" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockAccessKey"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockAccessKey" type="long, org.apache.hadoop.io.Text, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyID" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExpiryDate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getMac" return="javax.crypto.Mac"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setMac"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mac" type="javax.crypto.Mac"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Key used for generating and verifying access tokens]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.BlockAccessKey -->
+ <!-- start class org.apache.hadoop.hdfs.security.BlockAccessToken -->
+ <class name="BlockAccessToken" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockAccessToken"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockAccessToken" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTokenID" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTokenAuthenticator" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="DUMMY_TOKEN" type="org.apache.hadoop.hdfs.security.BlockAccessToken"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.BlockAccessToken -->
+ <!-- start class org.apache.hadoop.hdfs.security.ExportedAccessKeys -->
+ <class name="ExportedAccessKeys" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ExportedAccessKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isAccessTokenEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyUpdateInterval" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTokenLifetime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentKey" return="org.apache.hadoop.hdfs.security.BlockAccessKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllKeys" return="org.apache.hadoop.hdfs.security.BlockAccessKey[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="DUMMY_KEYS" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Object for passing access keys]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.ExportedAccessKeys -->
+ <!-- start class org.apache.hadoop.hdfs.security.InvalidAccessTokenException -->
+ <class name="InvalidAccessTokenException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidAccessTokenException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidAccessTokenException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Access token verification failed.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.InvalidAccessTokenException -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
+ <class name="DelegationTokenIdentifier" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenIdentifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty delegation token identifier for reading into.]]>
+ </doc>
+ </constructor>
+ <constructor name="DelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new delegation token identifier
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+ </doc>
+ </constructor>
+ <method name="getKind" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="HDFS_DELEGATION_KIND" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A delegation token identifier that is specific to HDFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
+ <class name="DelegationTokenSecretManager" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenSecretManager" type="long, long, long, long, org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a secret manager
+ @param delegationKeyUpdateInterval the number of seconds for rolling new
+ secret keys.
+ @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+ tokens
+ @param delegationTokenRenewInterval how often the tokens must be renewed
+ @param delegationTokenRemoverScanInterval how often the tokens are scanned
+ for expired tokens]]>
+ </doc>
+ </constructor>
+ <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTokenExpiryTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dtId" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns expiry time of a token given its identifier.
+
+ @param dtId DelegationTokenIdentifier of a token
+ @return Expiry time of the token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="loadSecretManagerState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Load SecretManager state from fsimage.
+
+ @param in input stream to read fsimage
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="saveSecretManagerState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Store the current state of the SecretManager for persistence
+
+ @param out Output stream for writing into fsimage.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addPersistedDelegationToken"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <param name="expiryTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method is intended to be used only while reading edit logs.
+
+ @param identifier DelegationTokenIdentifier read from the edit logs or
+ fsimage
+
+ @param expiryTime token expiry time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedMasterKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a MasterKey to the list of keys.
+
+ @param key DelegationKey
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedTokenRenewal"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <param name="expiryTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the token cache with renewal record in edit logs.
+
+ @param identifier DelegationTokenIdentifier of the renewed token
+ @param expiryTime
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedTokenCancellation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the token cache with the cancel record in edit logs
+
+ @param identifier DelegationTokenIdentifier of the canceled token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getNumberOfKeys" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of delegation keys currently stored.
+ @return number of delegation keys]]>
+ </doc>
+ </method>
+ <method name="logUpdateMasterKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call namesystem to update editlogs for new master key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A HDFS specific delegation token secret manager.
+ The secret manager is responsible for generating and accepting the password
+ for each token.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
+ <class name="DelegationTokenSelector" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenSelector"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A delegation token that is specialized for HDFS]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+ <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+ <class name="Balancer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Run a balancer
+ @param args]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main method of Balancer
+ @param args arguments to a Balancer
+ @exception any exception occurs during datanode balancing]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return this balancer's configuration]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[set this balancer's configuration]]>
+ </doc>
+ </method>
+ <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum number of concurrent blocks moves for
+ balancing purpose at a datanode]]>
+ </doc>
+ </field>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALREADY_RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_PROGRESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IO_EXCEPTION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ILLEGAL_ARGS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
+ when some datanodes become full or when new empty nodes join the cluster.
+ The tool is deployed as an application program that can be run by the
+ cluster administrator on a live HDFS cluster while applications
+ adding and deleting files.
+
+ <p>SYNOPSIS
+ <pre>
+ To start:
+ bin/start-balancer.sh [-threshold <threshold>]
+ Example: bin/ start-balancer.sh
+ start the balancer with a default threshold of 10%
+ bin/ start-balancer.sh -threshold 5
+ start the balancer with a threshold of 5%
+ To stop:
+ bin/ stop-balancer.sh
+ </pre>
+
+ <p>DESCRIPTION
+ <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
+ default value of 10%. The threshold sets a target for whether the cluster
+ is balanced. A cluster is balanced if for each datanode, the utilization
+ of the node (ratio of used space at the node to total capacity of the node)
+ differs from the utilization of the (ratio of used space in the cluster
+ to total capacity of the cluster) by no more than the threshold value.
+ The smaller the threshold, the more balanced a cluster will become.
+ It takes more time to run the balancer for small threshold values.
+ Also for a very small threshold the cluster may not be able to reach the
+ balanced state when applications write and delete files concurrently.
+
+ <p>The tool moves blocks from highly utilized datanodes to poorly
+ utilized datanodes iteratively. In each iteration a datanode moves or
+ receives no more than the lesser of 10G bytes or the threshold fraction
+ of its capacity. Each iteration runs no more than 20 minutes.
+ At the end of each iteration, the balancer obtains updated datanodes
+ information from the namenode.
+
+ <p>A system property that limits the balancer's use of bandwidth is
+ defined in the default configuration file:
+ <pre>
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>1048576</value>
+ <description> Specifies the maximum bandwidth that each datanode
+ can utilize for the balancing purpose in term of the number of bytes
+ per second. </description>
+ </property>
+ </pre>
+
+ <p>This property determines the maximum speed at which a block will be
+ moved from one datanode to another. The default value is 1MB/s. The higher
+ the bandwidth, the faster a cluster can reach the balanced state,
+ but with greater competition with application processes. If an
+ administrator changes the value of this property in the configuration
+ file, the change is observed when HDFS is next restarted.
+
+ <p>MONITERING BALANCER PROGRESS
+ <p>After the balancer is started, an output file name where the balancer
+ progress will be recorded is printed on the screen. The administrator
+ can monitor the running of the balancer by reading the output file.
+ The output shows the balancer's status iteration by iteration. In each
+ iteration it prints the starting time, the iteration number, the total
+ number of bytes that have been moved in the previous iterations,
+ the total number of bytes that are left to move in order for the cluster
+ to be balanced, and the number of bytes that are being moved in this
+ iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
+ To Move" is decreasing.
+
+ <p>Running multiple instances of the balancer in an HDFS cluster is
+ prohibited by the tool.
+
+ <p>The balancer automatically exits when any of the following five
+ conditions is satisfied:
+ <ol>
+ <li>The cluster is balanced;
+ <li>No block can be moved;
+ <li>No block has been moved for five consecutive iterations;
+ <li>An IOException occurs while communicating with the namenode;
+ <li>Another balancer is running.
+ </ol>
+
+ <p>Upon exit, a balancer returns an exit code and prints one of the
+ following messages to the output file in corresponding to the above exit
+ reasons:
+ <ol>
+ <li>The cluster is balanced. Exiting
+ <li>No block can be moved. Exiting...
+ <li>No block has been moved for 3 iterations. Exiting...
+ <li>Received an IO exception: failure reason. Exiting...
+ <li>Another balancer is running. Exiting...
+ </ol>
+
+ <p>The administrator can interrupt the execution of the balancer at any
+ time by running the command "stop-balancer.sh" on the machine where the
+ balancer is running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+ <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <class name="GenerationStamp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="GenerationStamp"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
+ </doc>
+ </constructor>
+ <method name="getStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="setStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="nextStamp" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[First increments the counter and then returns the stamp]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="FIRST_VALID_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first valid generation stamp.]]>
+ </doc>
+ </field>
+ <field name="GRANDFATHER_GENERATION_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generation stamp of blocks that pre-date the introduction
+ of a generation stamp.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <interface name="HdfsConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="READ_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy internal HDFS constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
+ <class name="HdfsConstants.BlockUCState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="COMPLETE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Block construction completed.<br>
+ The block has at least one {@link ReplicaState#FINALIZED} replica,
+ and is not going to be modified.]]>
+ </doc>
+ </field>
+ <field name="UNDER_CONSTRUCTION" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is under construction.<br>
+ It has been recently allocated for write or append.]]>
+ </doc>
+ </field>
+ <field name="UNDER_RECOVERY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is under recovery.<br>
+ When a file lease expires its last block may not be {@link #COMPLETE}
+ and needs to go through a recovery procedure,
+ which synchronizes the existing replicas contents.]]>
+ </doc>
+ </field>
+ <field name="COMMITTED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is committed.<br>
+ The client reported that all bytes are written to data-nodes
+ with the given generation stamp and block length, but no
+ {@link ReplicaState#FINALIZED}
+ replicas has yet been reported by data-nodes themselves.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[States, which a block can go through while it is under construction.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
+ <class name="HdfsConstants.NamenodeRole" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ACTIVE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STANDBY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Defines the NameNode role.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Type of the node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
+ <class name="HdfsConstants.ReplicaState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="int"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="FINALIZED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is finalized. The state when replica is not modified.]]>
+ </doc>
+ </field>
+ <field name="RBW" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is being written to.]]>
+ </doc>
+ </field>
+ <field name="RWR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is waiting to be recovered.]]>
+ </doc>
+ </field>
+ <field name="RUR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is under recovery.]]>
+ </doc>
+ </field>
+ <field name="TEMPORARY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary replica: created for replication and relocation only.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Block replica states, which it can go through while being constructed.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toNodeRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Startup options]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <class name="InconsistentFSStateException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when file system state is inconsistent
+ and is not recoverable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <class name="IncorrectVersionException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IncorrectVersionException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when external version does not match
+ current version of the application.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.JspHelper -->
+ <class name="JspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="streamBlockInAscii"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
+ <param name="genStamp" type="long"/>
+ <param name="blockSize" type="long"/>
+ <param name="offsetIntoBlock" type="long"/>
+ <param name="chunkSizeToView" type="long"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <param name="row" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableFooter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sortNodeList"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="java.util.ArrayList"/>
+ <param name="field" type="java.lang.String"/>
+ <param name="order" type="java.lang.String"/>
+ </method>
+ <method name="printPathWithLinks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.lang.String"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="tokenString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="printGotoForm"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="tokenString" type="java.lang.String"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTitle"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="string2ChunkSizeToView" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Convert a String to chunk-size-to-view.]]>
+ </doc>
+ </method>
+ <method name="getVersionTable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a table containing version information.]]>
+ </doc>
+ </method>
+ <method name="validatePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate filename.
+ @return null if the filename is invalid.
+ Otherwise, return the validated filename.]]>
+ </doc>
+ </method>
+ <method name="validateLong" return="java.lang.Long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate a long value.
+ @return null if the value is invalid.
+ Otherwise, return the validated Long object.]]>
+ </doc>
+ </method>
+ <method name="validateURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate a URL.
+ @return null if the value is invalid.
+ Otherwise, return the validated URL String.]]>
+ </doc>
+ </method>
+ <method name="getDefaultWebUser" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If security is turned off, what is the default web user?
+ @param conf the configuration to look in
+ @return the remote user that was configuration]]>
+ </doc>
+ </method>
+ <method name="getUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get {@link UserGroupInformation} and possibly the delegation token out of
+ the request.
+ @param request the http request
+ @return a new user from the request
+ @throws AccessControlException if the request has no token]]>
+ </doc>
+ </method>
+ <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DELEGATION_PARAMETER_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SET_DELEGATION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.JspHelper -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
+ <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty storage info of the specified type]]>
+ </doc>
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return default iterator
+ This iterator returns all entries in storageDirs]]>
+ </doc>
+ </method>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ <doc>
+ <![CDATA[Return iterator based on Storage Directory Type
+ This iterator selects entries in storageDirs of type dirType and returns
+ them via the Iterator]]>
+ </doc>
+ </method>
+ <method name="listStorageDirectories" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[generate storage list (debug line)]]>
+ </doc>
+ </method>
+ <method name="getNumStorageDirs" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ </method>
+ <method name="addStorageDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkVersionUpgradable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="oldVersion" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the upgrade from the given old version is supported. If
+ no upgrade is supported, it throws IncorrectVersionException.
+
+ @param oldVersion]]>
+ </doc>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get common storage fields.
+ Should be overloaded if additional fields need to be get.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set common storage fields.
+ Should be overloaded if additional fields need to be set.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write all data storage files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unlockAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock all storage directories.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isLockSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether underlying file system supports file locking.
+
+ @return <code>true</code> if exclusive locks are supported or
+ <code>false</code> otherwise.
+ @throws IOException
+ @see StorageDirectory#lock()]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCorruptedData"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.RandomAccessFile"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PRE_RBW_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STORAGE_FILE_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Storage information file.
+ <p>
+ Local storage information is stored in a separate file VERSION.
+ It contains type of the node,
+ the storage layout version, the namespace id, and
+ the fs state creation time.
+ <p>
+ Local storage can reside in multiple directories.
+ Each directory should contain the same VERSION file as the others.
+ During startup Hadoop servers (name-node and data-nodes) read their local
+ storage information from them.
+ <p>
+ The servers hold a lock for each storage directory while they run so that
+ other nodes were not able to startup sharing the same storage.
+ The locks are released when the servers stop (normally or abnormally).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <class name="Storage.StorageDirectory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage.StorageDirectory" type="java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRoot" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get root directory of this storage]]>
+ </doc>
+ </method>
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get storage directory type]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read version file.
+
+ @throws IOException if file cannot be read or contains inconsistent data]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write version file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clearDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear and re-create storage directory.
+ <p>
+ Removes contents of the current directory and creates an empty directory.
+
+ This does not fully format storage directory.
+ It cannot write the version file since it should be written last after
+ all other storage type dependent files are written.
+ Derived storage is responsible for setting specific storage values and
+ writing the version file to disk.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Directory {@code current} contains latest files defining
+ the file system meta-data.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[File {@code VERSION} contains the following fields:
+ <ol>
+ <li>node type</li>
+ <li>layout version</li>
+ <li>namespaceID</li>
+ <li>fs state creation time</li>
+ <li>other fields specific for this node type</li>
+ </ol>
+ The version file is always written last during storage directory updates.
+ The existence of the version file indicates that all other files have
+ been successfully written in the storage directory, the storage is valid
+ and does not need to be recovered.
+
+ @return the version file path]]>
+ </doc>
+ </method>
+ <method name="getPreviousVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[File {@code VERSION} from the {@code previous} directory.
+
+ @return the previous version file path]]>
+ </doc>
+ </method>
+ <method name="getPreviousDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Directory {@code previous} contains the previous file system state,
+ which the system can be rolled back to.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getPreviousTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code previous.tmp} is a transient directory, which holds
+ current file system state while the new state is saved into the new
+ {@code current} during upgrade.
+ If the saving succeeds {@code previous.tmp} will be moved to
+ {@code previous}, otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getRemovedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code removed.tmp} is a transient directory, which holds
+ current file system state while the previous state is moved into
+ {@code current} during rollback.
+ If the moving succeeds {@code removed.tmp} will be removed,
+ otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getFinalizedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code finalized.tmp} is a transient directory, which holds
+ the {@code previous} file system state while it is being removed
+ in response to the finalize request.
+ Finalize operation will remove {@code finalized.tmp} when completed,
+ otherwise the removal will resume upon the system startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getLastCheckpointTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code lastcheckpoint.tmp} is a transient directory, which holds
+ current file system state while the new state is saved into the new
+ {@code current} during regular namespace updates.
+ If the saving succeeds {@code lastcheckpoint.tmp} will be moved to
+ {@code previous.checkpoint}, otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getPreviousCheckpoint" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code previous.checkpoint} is a directory, which holds the previous
+ (before the last save) state of the storage directory.
+ The directory is created as a reference only, it does not play role
+ in state recovery procedures, and is recycled automatically,
+ but it may be useful for manual recovery of a stale state of the system.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check consistency of the storage directory
+
+ @param startOpt a startup option.
+
+ @return state {@link StorageState} of the storage directory
+ @throws InconsistentFSStateException if directory state is not
+ consistent and cannot be recovered.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doRecover"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete or recover storage state from previously failed transition.
+
+ @param curState specifies what/how the state should be recovered
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Lock storage to provide exclusive access.
+
+ <p> Locking is not supported by all file systems.
+ E.g., NFS does not consistently support exclusive locks.
+
+ <p> If locking is supported we guarantee exculsive access to the
+ storage directory. Otherwise, no guarantee is given.
+
+ @throws IOException if locking fails]]>
+ </doc>
+ </method>
+ <method name="unlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock storage.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[One of the storage directories.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <interface name="Storage.StorageDirType" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isOfType" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ </method>
+ <doc>
+ <![CDATA[An interface to denote storage directory type
+ Implementations can define a type for storage directory by implementing
+ this interface.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <class name="Storage.StorageState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <class name="StorageInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="int, int, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLayoutVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Layout version of the storage data.]]>
+ </doc>
+ </method>
+ <method name="getNamespaceID" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Namespace id of the file system.<p>
+ Assigned to the file system at formatting and never changes after that.
+ Shared by all file system components.]]>
+ </doc>
+ </method>
+ <method name="getCTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creation time of the file system state.<p>
+ Modified during upgrades.]]>
+ </doc>
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="layoutVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namespaceID" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="cTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Common class for storage information.
+
+ TODO namespaceID should be long and computed as hash(address + port)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <interface name="Upgradeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the upgrade object.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of the software component, which this object is upgrading.
+ @return type]]>
+ </doc>
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Description of the upgrade object for displaying.
+ @return description]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Upgrade status determines a percentage of the work done out of the total
+ amount required by the upgrade.
+
+ 100% means that the upgrade is completed.
+ Any value < 100 means it is not complete.
+
+ The return value should provide at least 2 values, e.g. 0 and 100.
+ @return integer value in the range [0, 100].]]>
+ </doc>
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Prepare for the upgrade.
+ E.g. initialize upgrade data structures and set status to 0.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. name-node informs data-nodes that they must perform a distributed upgrade.
+
+ @return an UpgradeCommand for broadcasting.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade.
+ E.g. cleanup upgrade data structures or write metadata to disk.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. data-nodes inform the name-node that they completed the upgrade
+ while other data-nodes are still upgrading.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status report for the upgrade.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return {@link UpgradeStatusReport}
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Common interface for distributed upgrade objects.
+
+ Each upgrade object corresponds to a layout version,
+ which is the latest version that should be upgraded using this object.
+ That is all components whose layout version is greater or equal to the
+ one returned by {@link #getVersion()} must be upgraded with this object.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <class name="UpgradeManager" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeState" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setUpgradeState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uState" type="boolean"/>
+ <param name="uVersion" type="int"/>
+ </method>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initializeUpgrade" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isUpgradeCompleted" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="currentUpgrades" type="java.util.SortedSet"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeState" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Generic upgrade manager.
+
+ {@link #broadcastCommand} is the command that should be]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <class name="UpgradeObject" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ <constructor name="UpgradeObject"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="status" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Abstract upgrade object.
+
+ Contains default implementation of common methods of {@link Upgradeable}
+ interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <class name="UpgradeObjectCollection" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectCollection"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="versionFrom" type="int"/>
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of upgrade objects.
+
+ Upgrade objects should be registered here before they can be used.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <class name="UpgradeStatusReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UpgradeStatusReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeStatusReport" type="int, short, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the currently running upgrade.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
+
+ @see Upgradeable#getUpgradeStatus()]]>
+ </doc>
+ </method>
+ <method name="isFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is current upgrade finalized.
+ @return true if finalized or false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <doc>
+ <![CDATA[Get upgradeStatus data as a text for reporting.
+ Should be overloaded for a particular upgrade specific upgradeStatus data.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return text]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print basic upgradeStatus details.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="version" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeStatus" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="finalized" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base upgrade upgradeStatus class.
+ Overload this class if specific status fields need to be reported.
+
+ Describes status of current upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Util"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="now" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Current system time.
+ @return current time in msec.]]>
+ </doc>
+ </method>
+ <method name="stringAsURI" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interprets the passed string as a URI. In case of error it
+ assumes the specified string is a file.
+
+ @param s the string to interpret
+ @return the resulting URI
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="fileAsURI" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the passed File to a URI.
+
+ @param f the file to convert
+ @return the resulting URI
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stringCollectionAsURIs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Converts a collection of strings into a collection of URIs.
+ @param names collection of strings to convert to URIs
+ @return collection of URIs]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
+ </doc>
+ </method>
+ <method name="newSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
+ </doc>
+ </method>
+ <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the DataNode object]]>
+ </doc>
+ </method>
+ <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSelfAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDatanodeRegistration" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return DatanodeRegistration]]>
+ </doc>
+ </method>
+ <method name="getNamenode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the namenode's identifier]]>
+ </doc>
+ </method>
+ <method name="setNewStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.
+ This method can only be called by the offerService thread.
+ Otherwise, deadlock might occur.]]>
+ </doc>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if there is no space in disk
+ @param e that caused this checkDiskError call]]>
+ </doc>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if there is a disk failure and if so, handle the error]]>
+ </doc>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Main loop for the DataNode. Runs until shutdown,
+ forever calling remote NameNode functions.]]>
+ </doc>
+ </method>
+ <method name="notifyNamenodeReceivedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
+ That's the loop that connects to the NameNode and provides basic DataNode
+ functionality.
+
+ Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
+ </doc>
+ </method>
+ <method name="runDatanodeDaemon"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate a single datanode object. This must be run by invoking
+ {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
+ </doc>
+ </method>
+ <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="scheduleBlockReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delay" type="long"/>
+ <doc>
+ <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
+ </doc>
+ </method>
+ <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is used for testing.
+ Examples are adding and deleting blocks directly.
+ The most common usage will be when the data node's storage is similated.
+
+ @return the fsdataset that stores the blocks]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="java.util.Collection"/>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica with the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EMPTY_DEL_HINT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PKT_HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Header size for a packet]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[DataNode is a class (and program) that stores a set of
+ blocks for a DFS deployment. A single deployment can
+ have one or many DataNodes. Each DataNode communicates
+ regularly with a single NameNode. It also communicates
+ with client code and other DataNodes from time to time.
+
+ DataNodes store a series of named blocks. The DataNode
+ allows client code to read these blocks, or to write new
+ block data. The DataNode may also, in response to instructions
+ from its NameNode, delete blocks or copy blocks to/from other
+ DataNodes.
+
+ The DataNode maintains just one critical table:
+ block-> stream of bytes (of BLOCK_SIZE or less)
+
+ This info is stored on a local disk. The DataNode
+ reports the table's contents to the NameNode upon startup
+ and every so often afterwards.
+
+ DataNodes spend their lives in an endless loop of asking
+ the NameNode for something to do. A NameNode cannot connect
+ to a DataNode directly; a NameNode simply returns values from
+ functions invoked by a DataNode.
+
+ DataNodes maintain an open server socket so that client code
+ or other DataNodes can read/write data. The host/port for
+ this server is reported to the NameNode, which then sends that
+ information to clients or other DataNodes that might be interested.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
+ <class name="DatanodeJspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeJspHelper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Data storage information file.
+ <p>
+ @see Storage]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
+ <class name="DirectoryScanner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Periodically scans the data directories for block and block metadata files.
+ Reconciles the differences with block information maintained in
+ {@link FSDataset}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <class name="FSDataset" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
+ <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[An FSDataset has a directory where it loads its data files.]]>
+ </doc>
+ </constructor>
+ <method name="getMetaFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="findBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[Return the block file for the given ID]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total space used by dfs datanode]]>
+ </doc>
+ </method>
+ <method name="hasEnoughResource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true - if there are still valid volumes on the DataNode.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return total capacity, used and unused]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Find the block's on-disk length]]>
+ </doc>
+ </method>
+ <method name="getBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get File name for a given block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkOffset" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns handles to the block file and its metadata file]]>
+ </doc>
+ </method>
+ <method name="unlinkBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="numLinks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the block if this block is linked to an existing
+ snapshot. This ensures that modifying this block does not modify
+ data in any existing snapshots.
+ @param block Block
+ @param numLinks Unlink if the number of links exceed this value
+ @throws IOException
+ @return - true if the specified block was unlinked or the block
+ is not in any snapshot.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverClose"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="adjustCrcChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="checksumSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the offset in the meta file so that the
+ last checksum will be overwritten.]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete the block write!]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove the temporary block file (if any)]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generates a block report from the in-memory block map.]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Check whether the given block is a valid one.
+ valid means finalized]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[We're informed that a block is no longer valid. We
+ could lazily garbage-collect the block, but why bother?
+ just get rid of it.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Turn the block identifier into a filename; ignore generation stamp!!!]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[check if a data directory is healthy
+ if some volumes failed - make sure to remove all the blocks that belong
+ to these volumes
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="checkAndUpdate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <param name="diskFile" type="java.io.File"/>
+ <param name="diskMetaFile" type="java.io.File"/>
+ <param name="vol" type="org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume"/>
+ <doc>
+ <![CDATA[Reconcile the difference between blocks on the disk and blocks in
+ volumeMap
+
+ Check the given block for inconsistencies. Look at the
+ current state of the block and reconcile the differences as follows:
+ <ul>
+ <li>If the block file is missing, delete the block from volumeMap</li>
+ <li>If the block file exists and the block is missing in volumeMap,
+ add the block to volumeMap <li>
+ <li>If generation stamp does not match, then update the block with right
+ generation stamp</li>
+ <li>If the block length in memory does not match the actual block file length
+ then mark the block as corrupt and update the block length in memory</li>
+ <li>If the file in {@link ReplicaInfo} does not match the file on
+ the disk, update {@link ReplicaInfo} with the correct file</li>
+ </ul>
+
+ @param blockId Block that differs
+ @param diskFile Block file on the disk
+ @param diskMetaFile Metadata file from on the disk
+ @param vol Volume of the block file]]>
+ </doc>
+ </method>
+ <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #fetchReplicaInfo(long)} instead.">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[@deprecated use {@link #fetchReplicaInfo(long)} instead.]]>
+ </doc>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newlength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="METADATA_EXTENSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="METADATA_VERSION" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSDataset manages a set of data blocks. Each block
+ has a unique name and an extent on disk.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <interface name="FSDatasetInterface" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the length of the metadata file of the specified block
+ @param b - the block for which the metadata length is desired
+ @return the length of the metadata file for the specified block.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns metaData of block b as an input stream (and its length)
+ @param b - the block
+ @return the metadata input stream;
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Does the meta file exist for this block?
+ @param b - the block
+ @return true of the metafile for specified block exits
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the specified block's on-disk length (excluding metadata)
+ @param b
+ @return the specified block's on-disk length (excluding metadta)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.Replica"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[Get reference to the replica meta info in the replicasMap.
+ To be called from methods that are synchronized on {@link FSDataset}
+ @param blockId
+ @return replica from the replicas map]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the generation stamp stored with the block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream to read the contents of the specified block
+ @param b
+ @return an input stream to read the contents of the specified block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ @param b
+ @param seekOffset
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkoff" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ The block is still in the tmp directory and is not finalized
+ @param b
+ @param blkoff
+ @param ckoff
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary replica and returns the meta information of the replica
+
+ @param b block
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a RBW replica and returns the meta info of the replica
+
+ @param b block
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recovers a RBW replica and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param minBytesRcvd the minimum number of bytes that the replica could have
+ @param maxBytesRcvd the maximum number of bytes that the replica could have
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to a finalized replica and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @return the meata info of the replica which is being written to
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover a failed append to a finalized replica
+ and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @return the meta info of the replica which is being written to
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="recoverClose"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover a failed pipeline close
+ It bumps the replica's generation stamp and finalize it if RBW replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
+ The block size is what is in the parameter b and it must match the amount
+ of data written
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
+ The temporary file associated with this block is deleted.
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block report - the full list of blocks stored
+ @return - the block report - the full list of blocks stored]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Is the block valid?
+ @param b
+ @return - true if the specified block is valid]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the specified blocks
+ @param invalidBlks - the blocks to be invalidated
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[Check if all the data directories are healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stringifies the name of the storage]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the FSDataset]]>
+ </doc>
+ </method>
+ <method name="adjustCrcChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="checksumSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the file pointer of the checksum stream so that the last checksum
+ will be overwritten
+ @param b block
+ @param stream The stream for the data file and checksum file
+ @param checksumSize number of bytes each checksum has
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="hasEnoughResource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[checks how many valid storage volumes are there in the DataNode
+ @return true if more then minimum valid volumes left in the FSDataSet]]>
+ </doc>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get visible length of the specified replica.]]>
+ </doc>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a replica recovery.
+
+ @return actual state of the replica on this data-node or
+ null if data-node does not have the replica.]]>
+ </doc>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica's generation stamp and length and finalize it.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is an interface for the underlying storage that stores blocks for
+ a data node.
+ Examples are the FSDataset (which stores blocks on dirs) and
+ SimulatedFSDataset (which simulates data).]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class contains the input streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This class contains the output streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides the input stream and length of the metadata
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.Replica -->
+ <interface name="Replica" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get block ID]]>
+ </doc>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get generation stamp]]>
+ </doc>
+ </method>
+ <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replica state
+ @return the replica state]]>
+ </doc>
+ </method>
+ <method name="getNumBytes" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes received
+ @return the number of bytes that have been received]]>
+ </doc>
+ </method>
+ <method name="getBytesOnDisk" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes that have written to disk
+ @return the number of bytes that have written to disk]]>
+ </doc>
+ </method>
+ <method name="getVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes that are visible to readers
+ @return the number of bytes that are visible to readers]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This represents block replicas which stored in DataNode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.Replica -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
+ <class name="ReplicaInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.datanode.Replica"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class is used by datanodes to maintain meta data of its replicas.
+ It provides a general interface for meta information of a replica.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
+ <class name="ReplicaNotFoundException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReplicaNotFoundException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ReplicaNotFoundException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Exception indicating that DataNode does not have a replica
+ that matches the target block.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+ <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="UpgradeObjectDatanode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Specifies how the upgrade is performed.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade and return a status complete command for broadcasting.
+
+ Data-nodes finish upgrade at different times.
+ The data-node needs to re-confirm with the name-node that the upgrade
+ is complete while other nodes are still upgrading.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for data-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
+ The MBean is register using the name
+ "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <class name="DataNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various DataNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #blocksRead}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+ <interface name="FSDatasetMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the total space (in bytes) used by dfs datanode
+ @return the total space used by dfs datanode
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
+ @return total capacity of storage (used and unused)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the amount of free storage space (in bytes)
+ @return The amount of free storage space
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the storage id of the underlying storage]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
+ a data node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+ * Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the FSDatasetMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Data Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
+ <class name="BackupNode" extends="org.apache.hadoop.hdfs.server.namenode.NameNode"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setRpcServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setHttpServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="loadNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nnReg" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="args" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[BackupNode.
+ <p>
+ Backup node can play two roles.
+ <ol>
+ <li>{@link NamenodeRole#CHECKPOINT} node periodically creates checkpoints,
+ that is downloads image and edits from the active node, merges them, and
+ uploads the new image back to the active.</li>
+ <li>{@link NamenodeRole#BACKUP} node keeps its namespace in sync with the
+ active node, and periodically creates checkpoints by simply saving the
+ namespace image to local disk(s).</li>
+ </ol>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
+ <class name="BackupStorage" extends="org.apache.hadoop.hdfs.server.namenode.FSImage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="isConversionNeeded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
+ <class name="BlockManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="processReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="report" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting all its blocks. Use this info to
+ update the (machine-->blocklist) and (block-->machinelist) tables.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_INITIAL_MAP_CAPACITY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_MAP_LOAD_FACTOR" type="float"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_MAX_CORRUPT_FILES_RETURNED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Keeps information related to the blocks stored in the Hadoop cluster.
+ This class is a helper class for {@link FSNamesystem} and requires several
+ methods to be called with lock held on {@link FSNamesystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
+ <class name="BlockPlacementPolicy" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockPlacementPolicy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="verifyBlockPlacement" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <param name="minRacks" type="int"/>
+ <doc>
+ <![CDATA[Verify that the block is replicated on at least minRacks different racks
+ if there is more than minRacks rack in the system.
+
+ @param srcPath the full pathname of the file to be verified
+ @param lBlk block with locations
+ @param minRacks number of racks the block should be replicated to
+ @return the difference between the required and the actual number of racks
+ the block is replicated to.]]>
+ </doc>
+ </method>
+ <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="replicationFactor" type="short"/>
+ <param name="existingReplicas" type="java.util.Collection"/>
+ <param name="moreExistingReplicas" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Decide whether deleting the specified replica of the block still makes
+ the block conform to the configured block placement policy.
+
+ @param srcInode The inode of the file to which the block-to-be-deleted belongs
+ @param block The block to be deleted
+ @param replicationFactor The required number of replicas for this block
+ @param existingReplicas The replica locations of this block that are present
+ on at least two unique racks.
+ @param moreExistingReplicas Replica locations of this block that are not
+ listed in the previous parameter.
+ @return the replica that is the best candidate for deletion]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[Used to setup a BlockPlacementPolicy object. This should be defined by
+ all implementations of a BlockPlacementPolicy.
+
+ @param conf the configuration object
+ @param stats retrieve cluster status from here
+ @param clusterMap cluster topology]]>
+ </doc>
+ </method>
+ <method name="getInstance" return="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[Get an instance of the configured Block Placement Policy based on the
+ value of the configuration paramater dfs.block.replicator.classname.
+
+ @param conf the configuration to be used
+ @param stats an object thatis used to retrieve the load on the cluster
+ @param clusterMap the network topology of the cluster
+ @return an instance of BlockPlacementPolicy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used for choosing the desired number of targets
+ for placing block replicas.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
+ <class name="BlockPlacementPolicy.NotEnoughReplicasException" extends="java.lang.Exception"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
+ <class name="BlockPlacementPolicyDefault" extends="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="excludedNodes" type="java.util.HashMap"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="verifyBlockPlacement" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <param name="minRacks" type="int"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="replicationFactor" type="short"/>
+ <param name="first" type="java.util.Collection"/>
+ <param name="second" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The class is responsible for choosing the desired number of targets
+ for placing block replicas.
+ The replica placement strategy is that if the writer is on a datanode,
+ the 1st replica is placed on the local machine,
+ otherwise a random datanode. The 2nd replica is placed on a datanode
+ that is on a different rack. The 3rd replica is placed on a datanode
+ which is on a different node of the rack as the second replica.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="CheckpointSignature"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
+ <class name="ContentSummaryServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContentSummaryServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Servlets for file checksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <class name="CorruptReplicasMap" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CorruptReplicasMap"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addToCorruptReplicasMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt.
+
+ @param blk Block to be added to CorruptReplicasMap
+ @param dn DatanodeDescriptor which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="numCorruptReplicas" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Stores information about all corrupt blocks in the File System.
+ A Block is considered corrupt only if all of its replicas are
+ corrupt. While reporting replicas of a Block, we hide any corrupt
+ copies. These copies are removed once Block is found to have
+ expected number of good replicas.
+ Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+ @param nodeID id of the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param hostName it could be different from host specified for DatanodeID]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param capacity capacity of the data node
+ @param dfsUsed space used by the data node
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param capacity capacity of the data node, including space used by non-dfs
+ @param dfsUsed the used space by dfs datanode
+ @param remaining remaining capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <method name="numBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlocksScheduled" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Approximate number of blocks currently scheduled to be written
+ to this datanode.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <field name="isAlive" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="needKeyUpdate" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
+ such as available storage capacity, last update time, etc.,
+ and maintains a set of blocks stored on the datanode.
+
+ This data structure is a data structure that is internal
+ to the namenode. It is *not* sent over-the-wire to the Client
+ or the Datnodes. Neither is it stored persistently in the
+ fsImage.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Block and targets pair]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DelegationTokenServlet -->
+ <class name="DelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="PATH_SPEC" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Serve delegation tokens over http for use in hftp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DelegationTokenServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <class name="FileChecksumServlets" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Servlets for file checksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.GetServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Get FileChecksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.RedirectServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileDataServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="parent" type="java.lang.String"/>
+ <param name="i" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+ <doc>
+ <![CDATA[Create a redirection URI]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/data[/<path>] HTTP/1.1
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <class name="FsckServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FsckServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Handle fsck request]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's web server to do fsck on namenode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
+ <interface name="FSClusterStats" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[an indication of the total load of the cluster.
+
+ @return a count of the total number of block transfers and block
+ writes that are currently occuring on the cluster.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used for retrieving the load related statistics of
+ the cluster.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <class name="FSEditLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="logSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sync all modifications done by this thread.
+
+ The internal concurrency design of this class is as follows:
+ - Log items are written synchronized into an in-memory buffer,
+ and each assigned a transaction ID.
+ - When a thread (client) would like to sync all of its edits, logSync()
+ uses a ThreadLocal transaction ID to determine what edit number must
+ be synced to.
+ - The isSyncRunning volatile boolean tracks whether a sync is currently
+ under progress.
+
+ The data is double-buffered within each edit log implementation so that
+ in-memory writing can occur in parallel with the on-disk writing.
+
+ Each sync occurs in three steps:
+ 1. synchronized, it swaps the double buffer and sets the isSyncRunning
+ flag.
+ 2. unsynchronized, it flushes the data to storage
+ 3. synchronized, it resets the flag and notifies anyone waiting on the
+ sync.
+
+ The lack of synchronization on step 2 allows other threads to continue
+ to write into the memory buffer while the sync is in progress.
+ Because this step is unsynchronized, actions that need to avoid
+ concurrency with sync() should be synchronized and also call
+ waitForSyncToFinish() before assuming they are running alone.]]>
+ </doc>
+ </method>
+ <method name="logOpenFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
+ <doc>
+ <![CDATA[Add open lease record to edit log.
+ Records the block locations of the last block.]]>
+ </doc>
+ </method>
+ <method name="logCloseFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
+ <doc>
+ <![CDATA[Add close lease record to edit log.]]>
+ </doc>
+ </method>
+ <method name="logMkDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
+ <doc>
+ <![CDATA[Add create directory record to edit log]]>
+ </doc>
+ </method>
+ <method name="setBufferCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ </method>
+ <method name="getOutputStreamIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="streamType" type="org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType"/>
+ <doc>
+ <![CDATA[Get stream iterator for the specified type.]]>
+ </doc>
+ </method>
+ <field name="OP_INVALID" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="setRestoreFailedStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="getRestoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write last checkpoint time and version file into the storage directory.
+
+ The version file should always be written last.
+ Missing or corrupted version file indicates that
+ the checkpoint is not valid.
+
+ @param sd storage directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isConversionNeeded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="saveCurrent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save current image and empty journal into {@code current} directory.]]>
+ </doc>
+ </method>
+ <method name="moveCurrent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move {@code current} to {@code lastcheckpoint.tmp} and
+ recreate empty {@code current}.
+ {@code current} is moved only if it is well formatted,
+ that is contains VERSION file.
+
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()]]>
+ </doc>
+ </method>
+ <method name="moveLastCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move {@code lastcheckpoint.tmp} to {@code previous.checkpoint}
+
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()]]>
+ </doc>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFsEditName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="corruptPreUpgradeStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="rootDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="checkpointTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="editLog" type="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="removedStorageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[list of failed (and thus removed) storages]]>
+ </doc>
+ </field>
+ <field name="ckptState" type="org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates"
+ transient="false" volatile="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Can fs-image be rolled?]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
+ <interface name="FSInodeInfo" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFullPathName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a string representation of an inode
+
+ @return the full pathname (from root) that this inode represents]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used used the pluggable block placement policy
+ to expose a few characteristics of an Inode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <class name="FSNamesystem" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <method name="getNamespaceDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getStorageDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propertyName" type="java.lang.String"/>
+ </method>
+ <method name="getNamespaceEditsDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the default path permission when upgrading from releases with no
+ permissions (<=0.15) to releases with permissions (>=0.16)]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close down this file system manager.
+ Causes heartbeat and lease daemons to stop; waits briefly for
+ them to finish, but a short timeout returns control back to caller.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set owner for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Moves all the blocks from srcs and appends them to trg
+ To avoid rollbacks we will verify validitity of ALL of the args
+ before we start actual move.
+ @param target
+ @param srcs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[stores the modification and access time for this inode.
+ The access time is precise upto an hour. The transaction, if needed, is
+ written to the edits log but is not flushed.]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerms" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a symbolic link.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ The NameNode sets new replication and schedules either replication of
+ under-replicated data blocks or removal of the excessive block copies
+ if the blocks are over-replicated.
+
+ @see ClientProtocol#setReplication(String, short)
+ @param src file name
+ @param replication new replication
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludedNodes" type="java.util.HashMap"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[The client would like to obtain an additional block for the indicated
+ filename (which is being written-to). Return an array that consists
+ of the block, plus a set of machines. The first on this list should
+ be where the client writes data. Subsequent items in the list must
+ be provided in the connection to the first datanode.
+
+ Make sure the previous blocks have been reported by datanodes and
+ are replicated. Will return an empty 2-elt array if we want the
+ client to "try again later".]]>
+ </doc>
+ </method>
+ <method name="abandonBlock" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[The client would like to let go of the given block]]>
+ </doc>
+ </method>
+ <method name="completeFile" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="markBlockAsCorrupt"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt
+ @param blk Block to be marked as corrupt
+ @param dn Datanode which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Remove the indicated filename from namespace. If the filename
+ is a directory (non empty) and recursive is set to false then throw exception.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create all the necessary directories]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ @param src the directory name
+ @param startAfter the name to start after
+ @return a partial listing starting after startAfter]]>
+ </doc>
+ </method>
+ <method name="registerDatanode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+ <p>
+ The purpose of registration is to identify whether the new datanode
+ serves a new data storage, and will report new data block copies,
+ which the namenode was not aware of; or the datanode is a replacement
+ node for the data storage that was previously served by a different
+ or the same (in terms of host:port) datanode.
+ The data storages are distinguished by their storageIDs. When a new
+ data storage is reported the namenode issues a new unique storageID.
+ <p>
+ Finally, the namenode returns its namespaceID as the registrationID
+ for the datanodes.
+ namespaceID is a persistent attribute of the name space.
+ The registrationID is checked every time the datanode is communicating
+ with the namenode.
+ Datanodes with inappropriate registrationID are rejected.
+ If the namenode stops, and then restarts it can restore its
+ namespaceID and will continue serving the datanodes that has previously
+ registered with the namenode without restarting the whole cluster.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
+ </doc>
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get registrationID for datanodes based on the namespaceID.
+
+ @see #registerDatanode(DatanodeRegistration)
+ @see FSImage#newNamespaceID()
+ @return registration ID]]>
+ </doc>
+ </method>
+ <method name="computeDatanodeWork" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Compute block replication and block invalidation work
+ that can be scheduled on data-nodes.
+ The datanode will be informed of this work at the next heartbeat.
+
+ @return number of blocks scheduled for replication or removal.]]>
+ </doc>
+ </method>
+ <method name="setNodeReplicationLimit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="limit" type="int"/>
+ </method>
+ <method name="removeDatanode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[remove a datanode descriptor
+ @param nodeID datanode ID]]>
+ </doc>
+ </method>
+ <method name="processReport"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting all its blocks. Use this info to
+ update the (machine-->blocklist) and (block-->machinelist) tables.]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting that it received a certain block.]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total raw bytes including non-dfs used space.]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedNonDFS" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes for non DFS purposes such
+ as storing temporary files on the local file system]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total non-used raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of connections.]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList"/>
+ <param name="dead" type="java.util.ArrayList"/>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stop decommissioning the specified datanodes.]]>
+ </doc>
+ </method>
+ <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getStartTime" return="java.util.Date"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rereads the config to get hosts and exclude list file names.
+ Rereads the files to update the hosts and exclude lists. It
+ checks if any of the hosts have changed states:
+ 1. Added to hosts --> no further work needed here.
+ 2. Removed from hosts --> mark AdminState as decommissioned.
+ 3. Added to exclude --> start decommission.
+ 4. Removed from exclude --> stop decommission.]]>
+ </doc>
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get data node by storage ID.
+
+ @param nodeID
+ @return DatanodeDescriptor or null if the node is not found.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of blocks in the system.]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptReplicaBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns number of blocks with corrupt replicas]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPendingDeletionBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcessBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get FSNamesystemMetrics]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[shutdown FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="getNumLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of live data nodes
+ @return Number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="getNumDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return Number of dead data nodes]]>
+ </doc>
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <method name="numCorruptReplicas" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="getDecommissioningNodes" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDelegationTokenSecretManager" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param renewer
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param token
+ @return New expiryTime of the token
+ @throws InvalidToken
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logUpdateMasterKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log the updateMasterKey operation to edit logs
+
+ @param key new delegation key.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="auditLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Logger for audit events, noting successful FSNamesystem operations. Emits
+ to FSNamesystem.audit at INFO. Each event causes a set of tab-separated
+ <code>key=value</code> pairs to be written for the following properties:
+ <code>
+ ugi=&lt;ugi in RPC&gt;
+ ip=&lt;remote IP&gt;
+ cmd=&lt;command&gt;
+ src=&lt;src path&gt;
+ dst=&lt;dst path (optional)&gt;
+ perm=&lt;permissions (optional)&gt;
+ </code>]]>
+ </doc>
+ </field>
+ <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lmthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSNamesystem does the actual bookkeeping work for the
+ DataNode.
+
+ It tracks several important tables.
+
+ 1) valid fsname --> blocklist (kept on disk, logged)
+ 2) Set of all valid blocks (inverted #1)
+ 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
+ 4) machine --> blocklist (inverted #2)
+ 5) LRU cache of updated-heartbeat machines]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
+ <class name="INodeSymlink" extends="org.apache.hadoop.hdfs.server.namenode.INode"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="isLink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLinkValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSymlink" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An INode representing a symbolic link.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <class name="LeaseExpiredException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LeaseExpiredException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The lease that was being used to create this file has expired.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <class name="LeaseManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@return the lease containing src]]>
+ </doc>
+ </method>
+ <method name="countLease" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the number of leases currently in the system]]>
+ </doc>
+ </method>
+ <method name="setLeasePeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="softLimit" type="long"/>
+ <param name="hardLimit" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[LeaseManager does the lease housekeeping for writing on files.
+ This class also provides useful static methods for lease recovery.
+
+ Lease Recovery Algorithm
+ 1) Namenode retrieves lease information
+ 2) For each file f in the lease, consider the last block b of f
+ 2.1) Get the datanodes which contains b
+ 2.2) Assign one of the datanodes as the primary datanode p
+
+ 2.3) p obtains a new generation stamp form the namenode
+ 2.4) p get the block info from each datanode
+ 2.5) p computes the minimum block length
+ 2.6) p updates the datanodes, which have a valid generation stamp,
+ with the new generation stamp and the minimum block length
+ 2.7) p acknowledges the namenode the update results
+
+ 2.8) Namenode updates the BlockInfo
+ 2.9) Namenode removes f from the lease
+ and removes the lease once all files have been removed
+ 2.10) Namenode commit changes to edit log]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ListPathsServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="buildRoot" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <doc>
+ <![CDATA[Build a map from the query string, setting values and defaults.]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
+ }
+
+ Where <i>option</i> (default) in:
+ recursive (&quot;no&quot;)
+ filter (&quot;.*&quot;)
+ exclude (&quot;\..*\.crc&quot;)
+
+ Response: A flat list of files/directories in the following format:
+ {@code
+ <listing path="..." recursive="(yes|no)" filter="..."
+ time="yyyy-MM-dd hh:mm:ss UTC" version="...">
+ <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
+ <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
+ blocksize="..."
+ replication="..." size="..."/>
+ </listing>
+ }]]>
+ </doc>
+ </method>
+ <field name="df" type="java.lang.ThreadLocal"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Obtain meta-information about a filesystem.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <class name="NameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start NameNode.
+ <p>
+ The name-node can be started with one of the following startup options:
+ <ul>
+ <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
+ <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
+ <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
+ <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
+ <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
+ upgrade and create a snapshot of the current file system state</li>
+ <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
+ cluster back to the previous state</li>
+ <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
+ previous upgrade</li>
+ <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
+ </ul>
+ The option is passed via configuration field:
+ <tt>dfs.namenode.startup</tt>
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code> in the conf.
+
+ @param conf confirguration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Format a new filesystem. Destroys any filesystem that may already
+ exist at this location.]]>
+ </doc>
+ </method>
+ <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="address" type="java.lang.String"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namenode" type="java.net.InetSocketAddress"/>
+ </method>
+ <method name="getHostPortString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <doc>
+ <![CDATA[Compose a "host:port" string from the address.]]>
+ </doc>
+ </method>
+ <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setRpcServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setHttpServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="loadNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize name-node.
+
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Wait for service to finish.
+ (Normally, it runs forever.)]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
+ </doc>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAccessKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="args" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client needs to give up on the block.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client has detected an error on the specified located blocks
+ and is reporting them to the server. For now, the namenode will
+ mark the block as corrupt. In the future we might
+ check the blocks are actually corrupt.]]>
+ </doc>
+ </method>
+ <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="updatePipeline"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="src" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file.
+ @param src The string representation of the path to the file
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file. If the path refers to a
+ symlink then the FileStatus of the symlink is returned.
+ @param src The string representation of the path to the file
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="isInSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the cluster currently in safe mode?]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[@throws AccessControlException
+ @inheritDoc]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the list of datanodes that the namenode should allow to
+ connect. Re-reads conf by creating new HdfsConfiguration object and
+ uses the files list in the configuration to update the list.]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the size of the current edit log.]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the edit log.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the image]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode state into specified file]]>
+ </doc>
+ </method>
+ <method name="getCorruptFiles" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerms" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Data node notify the name node that it is alive
+ Return an array of block-oriented commands for the datanode to execute.
+ This will be either a transfer or a delete operation.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="verifyRequest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify request.
+
+ Verifies correctness of the datanode version, registration ID, and
+ if the datanode does not need to be shutdown.
+
+ @param nodeReg data node registration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="verifyVersion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="version" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify version.
+
+ @param version
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFsImageName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file]]>
+ </doc>
+ </method>
+ <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFsImageNameCheckpoint" return="java.io.File[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file uploaded by periodic
+ checkpointing]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address on which the NameNodes is listening to.
+ @return the address on which the NameNodes is listening to.]]>
+ </doc>
+ </method>
+ <method name="getHttpAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address of the NameNodes http server,
+ which is used to access the name-node web UI.
+
+ @return the http address.]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="refreshUserToGroupsMappings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DEFAULT_PORT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="stateChangeLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="role" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="server" type="org.apache.hadoop.ipc.Server"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[RPC server]]>
+ </doc>
+ </field>
+ <field name="rpcAddress" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[RPC server address]]>
+ </doc>
+ </field>
+ <field name="httpServer" type="org.apache.hadoop.http.HttpServer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[httpServer]]>
+ </doc>
+ </field>
+ <field name="httpAddress" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTTP server address]]>
+ </doc>
+ </field>
+ <field name="stopRequested" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[only used for testing purposes]]>
+ </doc>
+ </field>
+ <field name="nodeRegistration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Registration information of this name-node]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[NameNode serves as both directory namespace manager and
+ "inode table" for the Hadoop DFS. There is a single NameNode
+ running in any DFS deployment. (Well, except when there
+ is a second backup/failover NameNode.)
+
+ The NameNode controls two critical tables:
+ 1) filename->blocksequence (namespace)
+ 2) block->machinelist ("inodes")
+
+ The first table is stored on disk and is very precious.
+ The second table is rebuilt every time the NameNode comes
+ up.
+
+ 'NameNode' refers to both this class as well as the 'NameNode server'.
+ The 'FSNamesystem' class actually performs most of the filesystem
+ management. The majority of the 'NameNode' class itself is concerned
+ with exposing the IPC interface and the http server to the outside world,
+ plus some configuration management.
+
+ NameNode implements the ClientProtocol interface, which allows
+ clients to ask for DFS services. ClientProtocol is not
+ designed for direct use by authors of DFS client code. End-users
+ should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+
+ NameNode also implements the DatanodeProtocol interface, used by
+ DataNode programs that actually store DFS data blocks. These
+ methods are invoked repeatedly and automatically by all the
+ DataNodes in a DFS deployment.
+
+ NameNode also implements the NamenodeProtocol interface, used by
+ secondary namenodes or rebalancing processes to get partial namenode's
+ state, for example partial blocksMap etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <class name="NamenodeFsck" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="fsck"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check files on DFS, starting from the indicated path.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CORRUPT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEALTHY_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NONEXISTENT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILURE_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIXING_NONE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Don't attempt any fixing .]]>
+ </doc>
+ </field>
+ <field name="FIXING_MOVE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Move corrupted files to /lost+found .]]>
+ </doc>
+ </field>
+ <field name="FIXING_DELETE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete corrupted files.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link #FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <class name="NotReplicatedYetException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NotReplicatedYetException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The file has not finished being written to enough datanodes yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <class name="SafeModeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SafeModeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when the name node is in safe mode.
+ Client cannot modified namespace until the safe mode is off.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <class name="SecondaryNameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a connection to the primary namenode.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
+ The Secondary is responsible for supporting periodic checkpoints
+ of the HDFS metadata. The current design allows only one Secondary
+ NameNode per HDFs cluster.
+
+ The Secondary NameNode is a daemon that periodically wakes
+ up (determined by the schedule specified in the configuration),
+ triggers a periodic checkpoint and then goes back to sleep.
+ The Secondary NameNode uses the ClientProtocol to talk to the
+ primary NameNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StreamFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[getting a client for connecting to dfs]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
+ <class name="UnsupportedActionException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnsupportedActionException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when an operation is not supported.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+ <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectNamenode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process an upgrade command.
+ RPC has only one very generic command for all upgrade related inter
+ component communications.
+ The actual command recognition and execution should be handled here.
+ The reply is sent back also as an UpgradeCommand.
+
+ @param command
+ @return the reply command which is analyzed on the client side.]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forceProceed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Base class for name-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <interface name="FSNamesystemMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The state of the file system: Safemode or Operational
+ @return the state]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of allocated blocks in the system
+ @return - number of allocated blocks]]>
+ </doc>
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total storage capacity
+ @return - total capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Free (unused) storage capacity
+ @return - free capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used storage capacity
+ @return - used capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of files and directories
+ @return - num of files and directories]]>
+ </doc>
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks pending to be replicated
+ @return - num of blocks to be replicated]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks under replicated
+ @return - num of blocks under replicated]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks scheduled for replication
+ @return - num of blocks scheduled for replication]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total Load on the FSNamesystem
+ @return - total load of FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="getNumLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Live data nodes
+ @return number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="getNumDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return number of dead data nodes]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
+ a name node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+
+ Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the NameNodeStateMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Name Node runtime activity statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <class name="FSNamesystemMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.
+ We set the metrics value within this function before pushing it out.
+ FSNamesystem updates its own local variables which are
+ light weight compared to Metrics counters.
+
+ Some of the metrics are explicity casted to int. Few metrics collectors
+ do not handle long values. It is safe to cast to int for now as all these
+ values fit in int value.
+ Metrics related to DFS capacity are stored in bytes which do not fit in
+ int, so they are rounded to GB]]>
+ </doc>
+ </method>
+ <field name="numExpiredHeartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various FSNamesystem status metrics
+ and publishing them through the metrics interfaces.
+ The SNamesystem creates and registers the JMX MBean.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #filesTotal}.set()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
+ <class name="NameNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NameNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
+ The MBean is register using the name
+ "hadoop:service=NameNode,name=NameNodeActivity"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+ <class name="NameNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesDeleted" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFileInfoOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numcreateSymlinkOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numgetLinkTargetOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesInGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various NameNode activity statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #syncs}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockCommand" type="int, java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for transferring blocks to another datanode
+ @param blocktargetlist blocks to be transferred]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for the given action
+ @param blocks blocks related to the action]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A BlockCommand is an instruction to a datanode
+ regarding some blocks under its control. It tells
+ the DataNode to either invalidate a set of indicated
+ blocks, or to copy a set of indicated blocks to
+ another DataNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
+ <class name="BlockRecoveryCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockRecoveryCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty BlockRecoveryCommand.]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockRecoveryCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockRecoveryCommand with
+ the specified capacity for recovering blocks.]]>
+ </doc>
+ </constructor>
+ <method name="getRecoveringBlocks" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the list of recovering blocks.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <doc>
+ <![CDATA[Add recovering block to the command.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[BlockRecoveryCommand is an instruction to a data-node to recover
+ the specified blocks.
+
+ The data-node that receives this command treats itself as a primary
+ data-node in the recover process.
+
+ Block recovery is identified by a recoveryId, which is also the new
+ generation stamp, which the block will have after the recovery succeeds.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
+ <class name="BlockRecoveryCommand.RecoveringBlock" extends="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockRecoveryCommand.RecoveringBlock"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty RecoveringBlock.]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockRecoveryCommand.RecoveringBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create RecoveringBlock.]]>
+ </doc>
+ </constructor>
+ <method name="getNewGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the new generation stamp of the block,
+ which also plays role of the recovery id.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a block with locations from which it should be recovered
+ and the new generation stamp, which the block will have after
+ successful recovery.
+
+ The new generation stamp of the block, also plays role of the recovery id.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <class name="BlocksWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with one parameter]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[getter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to implement an array of BlockLocations
+ It provide efficient customized serialization/deserialization methods
+ in stead of using the default array (de)serialization provided by RPC]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations.BlockWithLocations"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block]]>
+ </doc>
+ </method>
+ <method name="getDatanodes" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block's locations]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to keep track of a block and its locations]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
+ <class name="CheckpointCommand" extends="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CheckpointCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CheckpointCommand" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature, boolean, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSignature" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checkpoint signature is used to ensure
+ that nodes are talking about the same checkpoint.]]>
+ </doc>
+ </method>
+ <method name="isImageObsolete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates whether current backup image is obsolete, and therefore
+ need to be discarded?
+
+ @return true if current image should be discarded.]]>
+ </doc>
+ </method>
+ <method name="needToReturnImage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates whether the new checkpoint image needs to be transfered
+ back to the name-node after the checkpoint is done.
+
+ @return true if the checkpoint should be returned back.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Checkpoint command.
+ <p>
+ Returned to the backup node by the name-node as a reply to the
+ {@link NamenodeProtocol#startCheckpoint(NamenodeRegistration)}
+ request.<br>
+ Contains:
+ <ul>
+ <li>{@link CheckpointSignature} identifying the particular checkpoint</li>
+ <li>indicator whether the backup image should be discarded before starting
+ the checkpoint</li>
+ <li>indicator whether the image should be transfered back to the name-node
+ upon completion of the checkpoint.</li>
+ </ul>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <class name="DatanodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base class for data-node command.
+ Issued by the name-node to notify data-nodes what should be done.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <interface name="DatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
+ @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
+
+ @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
+ new storageID if the datanode did not have one and
+ registration ID for further communication.]]>
+ </doc>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
+ alive and well. Includes some status info, too.
+ It also gives the NameNode a chance to return
+ an array of "DatanodeCommand" objects.
+ A DatanodeCommand tells the DataNode to invalidate local block(s),
+ or to copy them to other DataNodes, etc.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
+ The NameNode returns an array of Blocks that have become obsolete
+ and should be deleted. This function is meant to upload *all*
+ the locally-stored blocks. It's invoked upon startup and then
+ infrequently afterwards.
+ @param registration
+ @param blocks - the block list as an array of longs.
+ Each block is represented as 2 longs.
+ This is done instead of Block[] to reduce memory used by block reports.
+
+ @return - the next command for DN to process.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
+ recently-received block data, with a hint for pereferred replica
+ to be deleted when there is any excessive blocks.
+ For example, whenever client code
+ writes a new Block here, or another DataNode copies a Block to
+ this DataNode, it will call blockReceived().]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[errorReport() tells the NameNode about something that has gone
+ awry. Useful for debugging.]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is a very general way to send a command to the name-node during
+ distributed upgrade process.
+
+ The generosity is because the variety of upgrade commands is unpredictable.
+ The reply from the name-node is also received in the form of an upgrade
+ command.
+
+ @return a reply in the form of an upgrade command]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
+ }]]>
+ </doc>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Commit block synchronization in lease recovery]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[24: register() renamed registerDatanode()]]>
+ </doc>
+ </field>
+ <field name="NOTIFY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DISK_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INVALID_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FATAL_DISK_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_UNKNOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Determines actions that data node should perform
+ when receiving a datanode command.]]>
+ </doc>
+ </field>
+ <field name="DNA_TRANSFER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_INVALIDATE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_SHUTDOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_REGISTER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_FINALIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_RECOVERBLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_ACCESSKEYUPDATE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
+ It's used to upload current load information and block reports.
+
+ The only way a NameNode can communicate with a DataNode is by
+ returning values from these functions.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <constructor name="DatanodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeRegistration" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeRegistration]]>
+ </doc>
+ </constructor>
+ <method name="setInfoPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="infoPort" type="int"/>
+ </method>
+ <method name="setIpcPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ipcPort" type="int"/>
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="exportedKeys" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeRegistration class contains all information the name-node needs
+ to identify and verify a data-node when it contacts the name-node.
+ This information is sent by data-node with each communication request.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <class name="DisallowedDatanodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a datanode tries to register or communicate
+ with the namenode when it does not appear on the list of included nodes,
+ or has been specifically excluded.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <interface name="InterDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a replica recovery.
+
+ @return actual state of the replica on this data-node or
+ null if data-node does not have the replica.]]>
+ </doc>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica with the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[5: getBlockMetaDataInfo(), updateBlock() removed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An inter-datanode protocol for updating generation stamp]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
+ <class name="KeyUpdateCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KeyUpdateCommand" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExportedKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
+ <class name="NamenodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamenodeCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for name-node command.
+ Issued by the name-node to notify other name-nodes what should be done.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <interface name="NamenodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a list of blocks belonging to <code>datanode</code>
+ whose total size equals <code>size</code>.
+
+ @see org.apache.hadoop.hdfs.server.balancer.Balancer
+ @param datanode a data node
+ @param size requested size
+ @return a list of blocks & their locations
+ @throws RemoteException if size is less than or equal to 0 or
+ datanode does not exist]]>
+ </doc>
+ </method>
+ <method name="getAccessKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the current access keys
+
+ @return ExportedAccessKeys containing current access keys
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the size of the current edit log (in bytes).
+ @return The number of bytes in the current edit log.
+ @throws IOException
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the current edit log and opens a new one. The
+ call fails if the file system is in SafeMode.
+ @throws IOException
+ @return a unique token to identify this transaction.
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
+ new image to fsImage, removes the old edits and renames edits.new
+ to edits. The call fails if any of the four files are missing.
+ @throws IOException
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request name-node version and storage information.
+
+ @return {@link NamespaceInfo} identifying versions and storage information
+ of the name-node
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report to the active name-node an error occurred on a subordinate node.
+ Depending on the error code the active node may decide to unregister the
+ reporting node.
+
+ @param registration requesting node.
+ @param errorCode indicates the error
+ @param msg free text description of the error
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register a subordinate name-node like backup node.
+
+ @return {@link NamenodeRegistration} of the node,
+ which this node has just registered with.]]>
+ </doc>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A request to the active name-node to start a checkpoint.
+ The name-node should decide whether to admit it or reject.
+ The name-node also decides what should be done with the backup node
+ image before and after the checkpoint.
+
+ @see CheckpointCommand
+ @see NamenodeCommand
+ @see #ACT_SHUTDOWN
+
+ @param registration the requesting node
+ @return {@link CheckpointCommand} if checkpoint is allowed.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A request to the active name-node to finalize
+ previously started checkpoint.
+
+ @param registration the requesting node
+ @param sig {@code CheckpointSignature} which identifies the checkpoint.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="journalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the size of the active name-node journal (edit log) in bytes.
+
+ @param registration the requesting node
+ @return The number of bytes in the journal.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="records" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Journal edit records.
+ This message is sent by the active name-node to the backup node
+ via {@code EditLogBackupOutputStream} in order to synchronize meta-data
+ changes with the backup namespace image.
+
+ @param registration active node registration
+ @param jAction journal action
+ @param length length of the byte array
+ @param records byte array containing serialized journal records
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compared to the previous version the following changes have been introduced:
+ (Only the latest change is reflected.
+ The log of historical changes can be retrieved from the svn).
+
+ 4: new method added: getAccessKeys()]]>
+ </doc>
+ </field>
+ <field name="NOTIFY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FATAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_IS_ALIVE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_JOURNAL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_JSPOOL_START" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_CHECKPOINT_TIME" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_UNKNOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_SHUTDOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_CHECKPOINT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
+ It's used to get part of the name node state]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
+ <interface name="NamenodeProtocols" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
+ <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
+ <implements name="org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol"/>
+ <doc>
+ <![CDATA[The full set of RPC methods implemented by the Namenode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
+ <class name="NamenodeRegistration" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <constructor name="NamenodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamenodeRegistration" type="java.lang.String, java.lang.String, org.apache.hadoop.hdfs.server.common.StorageInfo, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get name-node role.]]>
+ </doc>
+ </method>
+ <method name="isRole" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"/>
+ </method>
+ <method name="getCheckpointTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the age of the image.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Information sent by a subordinate name-node to the active name-node
+ during the registration process.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamespaceInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamespaceInfo" type="int, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDistributedUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[NamespaceInfo is returned by the name-node in reply
+ to a data-node handshake.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
+ <interface name="NodeRegistration" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get address of the server node.
+ @return hostname:portNumber]]>
+ </doc>
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get registration ID of the server node.]]>
+ </doc>
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get layout version of the server node.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Generic class specifying information, which need to be sent to the name-node
+ during the registration process.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
+ <class name="ReplicaRecoveryInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReplicaRecoveryInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ReplicaRecoveryInfo" type="long, long, long, org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getOriginalReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Replica recovery information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
+ <class name="ServerCommand" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ServerCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Unknown server command constructor.
+ Creates a command with action 0.
+
+ @see NamenodeProtocol#ACT_UNKNOWN
+ @see DatanodeProtocol#DNA_UNKNOWN]]>
+ </doc>
+ </constructor>
+ <constructor name="ServerCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a command for the specified action.
+ Actions are protocol specific.
+
+ @see DatanodeProtocol
+ @see NamenodeProtocol
+ @param action]]>
+ </doc>
+ </constructor>
+ <method name="getAction" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get server command action.
+ @return action code.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Base class for a server command.
+ Issued by the name-node to notify other servers what should be done.
+ Commands are defined by actions defined in respective protocols.
+
+ @see DatanodeProtocol
+ @see NamenodeProtocol]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+ <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeCommand" type="int, int, short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="UC_ACTION_REPORT_STATUS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UC_ACTION_START_UPGRADE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This as a generic distributed upgrade command.
+
+ During the upgrade cluster components send upgrade commands to each other
+ in order to obtain or share information with them.
+ It is supposed that each upgrade defines specific upgrade command by
+ deriving them from this class.
+ The upgrade command contains version of the upgrade, which is verified
+ on the receiving side and current status of the upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+ <!-- start class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
+ <class name="DelegationTokenFetcher" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenFetcher" type="org.apache.hadoop.hdfs.DistributedFileSystem, java.io.DataOutputStream, org.apache.hadoop.security.UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Command-line interface]]>
+ </doc>
+ </method>
+ <method name="go"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Fetch a DelegationToken from the current Namenode and store it in the
+ specified file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <method name="report"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gives a report on how the FileSystem is doing.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Safe mode maintenance command.
+ Usage: java DFSAdmin -safemode [enter | leave | get]
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="saveNamespace" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to save the namespace.
+ Usage: java DFSAdmin -saveNamespace
+ @exception IOException
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="restoreFaileStorage" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to enable/disable/check restoring of failed storage replicas in the namenode.
+ Usage: java DFSAdmin -restoreFailedStorage true|false|check
+ @exception IOException
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
+ file.
+ Usage: java DFSAdmin -refreshNodes
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
+ Usage: java DFSAdmin -finalizeUpgrade
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="upgradeProgress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to request current distributed upgrade status,
+ a detailed status, or to force the upgrade to proceed.
+
+ Usage: java DFSAdmin -upgradeProgress [status | details | force]
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ Usage: java DFSAdmin -metasave filename
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if an error accoured wile accessing
+ the file or path.]]>
+ </doc>
+ </method>
+ <method name="printTopology" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Display each rack and the nodes assigned to that rack, as determined
+ by the NameNode, in a hierarchical manner. The nodes and racks are
+ sorted alphabetically.
+
+ @throws IOException If an error while getting datanode report]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the authorization policy on the {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="refreshUserToGroupsMappings" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the user-to-groups mappings on the {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param argv The parameters passed to this program.
+ @exception Exception if the filesystem does not exist.
+ @return 0 on success, non zero on error.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides some DFS administrative access.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
+ <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf current Configuration]]>
+ </doc>
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.
+ The tool also provides and option to filter open files during the scan.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
+ <!-- start class org.apache.hadoop.hdfs.tools.HDFSConcat -->
+ <class name="HDFSConcat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HDFSConcat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.HDFSConcat -->
+ <!-- start class org.apache.hadoop.hdfs.tools.JMXGet -->
+ <class name="JMXGet" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMXGet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="service" type="java.lang.String"/>
+ </method>
+ <method name="setPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="port" type="java.lang.String"/>
+ </method>
+ <method name="setServer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="java.lang.String"/>
+ </method>
+ <method name="setLocalVMUrl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.lang.String"/>
+ </method>
+ <method name="printAllValues"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[print all attributes' values]]>
+ </doc>
+ </method>
+ <method name="getValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.String"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[get single value by key]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@throws Exception
+ initializes MBeanServer]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[main
+
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[tool to get data from NameNode or DataNode using MBeans currently the
+ following MBeans are available (under hadoop domain):
+ hadoop:service=NameNode,name=FSNamesystemState (static)
+ hadoop:service=NameNode,name=NameNodeActivity (dynamic)
+ hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
+ hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
+ hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
+ (static)
+ hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215
+ (dynamic)
+
+
+ implementation note: all logging is sent to System.err (since it is a command
+ line tool)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.JMXGet -->
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+ <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
+ <class name="OfflineImageViewer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OfflineImageViewer" type="java.lang.String, org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="go"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process image file.]]>
+ </doc>
+ </method>
+ <method name="buildOptions" return="org.apache.commons.cli.Options"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Build command-line options and descriptions]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Entry point to command-line-driven operation. User may specify
+ options and start fsimage viewer from the command line. Program
+ will process image file and exit cleanly or, if an error is
+ encountered, inform user and exit.
+
+ @param args Command line options
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[OfflineImageViewer to dump the contents of an Hadoop image file to XML
+ or the console. Main entry point into utility, either via the
+ command line or programatically.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
+</package>
+
+</api>
diff --git a/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.22.0.xml b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.22.0.xml
new file mode 100644
index 0000000..cb0d4f0
--- /dev/null
+++ b/aarch64/share/hadoop/hdfs/jdiff/hadoop-hdfs_0.22.0.xml
@@ -0,0 +1,18589 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Sun Dec 04 01:00:08 UTC 2011 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop-hdfs 0.22.0"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/ivy/lib/Hadoop-Hdfs/jdiff/jdiff-1.0.9.jar:/x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/ivy/lib/Hadoop-Hdfs/jdiff/xerces-1.4.4.jar -classpath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/classes:/x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/conf:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-common/jars/hadoop-common-0.22.0-SNAPSHOT.jar:/home/jenkins/.ivy2/cache/commons-cli/commons-cli/jars/commons-cli-1.2.jar:/home/jenkins/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/jenkins/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.4.jar:/home/jenkins/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.1.1.jar:/home/jenkins/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.6.1.jar:/home/jenkins/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.6.1.jar:/home/jenkins/.ivy2/cache/log4j/log4j/bundles/log4j-1.2.16.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.26.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.26.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/servlet-api/jars/servlet-api-2.5-20081211.jar:/home/jenkins/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/jenkins/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-2.1-jetty/jars/jsp-2.1-jetty-6.1.26.jar:/home/jenkins/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-api-2.1-glassfish/jars/jsp-api-2.1-glassfish-2.1.v20091210.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-2.1-glassfish/jars/jsp-2.1-glassfish-2.1.v20091210.jar:/home/jenkins/.ivy2/cache/org.eclipse.jdt.core.compiler/ecj/jars/ecj-3.5.1.jar:/home/jenkins/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/jenkins/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.7.1.jar:/home/jenkins/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.1.jar:/home/jenkins/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/jenkins/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/jenkins/.ivy2/cache/net.sf.kosmosfs/kfs/jars/kfs-0.3.jar:/home/jenkins/.ivy2/cache/junit/junit/jars/junit-4.8.1.jar:/home/jenkins/.ivy2/cache/hsqldb/hsqldb/jars/hsqldb-1.8.0.10.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro/jars/avro-1.5.3.jar:/home/jenkins/.ivy2/cache/org.codehaus.jackson/jackson-mapper-asl/jars/jackson-mapper-asl-1.7.3.jar:/home/jenkins/.ivy2/cache/org.codehaus.jackson/jackson-core-asl/jars/jackson-core-asl-1.7.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.3.jar:/home/jenkins/.ivy2/cache/org.xerial.snappy/snappy-java/bundles/snappy-java-1.0.3.2.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro-ipc/jars/avro-ipc-1.5.3.jar:/home/jenkins/.ivy2/cache/commons-daemon/commons-daemon/jars/commons-daemon-1.0.1.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro-compiler/jars/avro-compiler-1.5.3.jar:/home/jenkins/.ivy2/cache/commons-lang/commons-lang/jars/commons-lang-2.5.jar:/home/jenkins/.ivy2/cache/org.apache.velocity/velocity/jars/velocity-1.6.4.jar:/home/jenkins/.ivy2/cache/commons-collections/commons-collections/jars/commons-collections-3.2.1.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer-ant/jars/paranamer-ant-2.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer-generator/jars/paranamer-generator-2.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.qdox/qdox/jars/qdox-1.12.jar:/home/jenkins/.ivy2/cache/asm/asm/jars/asm-3.3.jar:/home/jenkins/.ivy2/cache/org.apache.ant/ant/jars/ant-1.7.1.jar:/home/jenkins/.ivy2/cache/org.apache.ant/ant-launcher/jars/ant-launcher-1.7.1.jar:/home/jenkins/.ivy2/cache/org.aspectj/aspectjrt/jars/aspectjrt-1.6.5.jar:/home/jenkins/.ivy2/cache/org.aspectj/aspectjtools/jars/aspectjtools-1.6.5.jar:/home/jenkins/.ivy2/cache/org.mockito/mockito-all/jars/mockito-all-1.8.2.jar:/home/jenkins/.ivy2/cache/com.google.guava/guava/jars/guava-r09.jar:/home/jenkins/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/jenkins/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/jenkins/tools/ant/latest/lib/ant-launcher.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/xercesImpl.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-resolver.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-bcel.jar:/home/jenkins/tools/ant/latest/lib/ant-jsch.jar:/home/jenkins/tools/ant/latest/lib/ant-jmf.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-oro.jar:/home/jenkins/tools/ant/latest/lib/ant-netrexx.jar:/home/jenkins/tools/ant/latest/lib/ant-testutil.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-xalan2.jar:/home/jenkins/tools/ant/latest/lib/ant-javamail.jar:/home/jenkins/tools/ant/latest/lib/ant.jar:/home/jenkins/tools/ant/latest/lib/ant-junit.jar:/home/jenkins/tools/ant/latest/lib/ant-swing.jar:/home/jenkins/tools/ant/latest/lib/ant-commons-net.jar:/home/jenkins/tools/ant/latest/lib/ant-jdepend.jar:/home/jenkins/tools/ant/latest/lib/ant-junit4.jar:/home/jenkins/tools/ant/latest/lib/ant-commons-logging.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-bsf.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-log4j.jar:/home/jenkins/tools/ant/latest/lib/ant-jai.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-regexp.jar:/home/jenkins/tools/ant/latest/lib/ant-antlr.jar:/tmp/jdk1.6.0_29/lib/tools.jar -sourcepath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/src/java -apidir /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/lib/jdiff -apiname hadoop-hdfs 0.22.0 -->
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.Hdfs -->
+ <class name="Hdfs" extends="org.apache.hadoop.fs.AbstractFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getUriDefaultPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="createFlag" type="java.util.EnumSet"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="mkdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="renameInternal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="renameInternal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="supportsSymlinks" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="org.apache.hadoop.fs.Path"/>
+ <param name="link" type="org.apache.hadoop.fs.Path"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Hdfs -->
+</package>
+<package name="org.apache.hadoop.hdfs">
+ <!-- start class org.apache.hadoop.hdfs.BlockMissingException -->
+ <class name="BlockMissingException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockMissingException" type="java.lang.String, java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An exception that indicates that file was corrupted.
+ @param filename name of corrupted file
+ @param description a description of the corruption details]]>
+ </doc>
+ </constructor>
+ <method name="getFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the corrupted file.
+ @return name of corrupted file]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the offset at which this file is corrupted
+ @return offset of corrupted file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This exception is thrown when a read encounters a block that has no locations
+ associated with it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.BlockMissingException -->
+ <!-- start class org.apache.hadoop.hdfs.BlockReader -->
+ <class name="BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ </method>
+ <method name="readChunk" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksumBuf" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Java Doc required]]>
+ </doc>
+ </method>
+ <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new BlockReader specifically to satisfy a read.
+ This method also sends the OP_READ_BLOCK request.
+
+ @param sock An established Socket to the DN. The BlockReader will not close it normally
+ @param file File location
+ @param block The block object
+ @param blockToken The block token for security
+ @param startOffset The read offset, relative to block head
+ @param len The number of bytes to read
+ @param bufferSize The IO buffer size (not the client buffer size)
+ @param verifyChecksum Whether to verify checksum
+ @param clientName Client name
+ @return New BlockReader instance, or null on error.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[kind of like readFully(). Only reads as much as possible.
+ And allows use of protected readFully().]]>
+ </doc>
+ </method>
+ <method name="takeSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Take the socket used to talk to the DN.]]>
+ </doc>
+ </method>
+ <method name="hasSentStatusCode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the BlockReader has reached the end of its input stream
+ and successfully sent a status code back to the datanode.]]>
+ </doc>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[This is a wrapper around connection to datanode
+ and understands checksum, offset etc.
+
+ Terminology:
+ <dl>
+ <dt>block</dt>
+ <dd>The hdfs block, typically large (~64MB).
+ </dd>
+ <dt>chunk</dt>
+ <dd>A block is divided into chunks, each comes with a checksum.
+ We want transfers to be chunk-aligned, to be able to
+ verify checksums.
+ </dd>
+ <dt>packet</dt>
+ <dd>A grouping of chunks used for transport. It contains a
+ header, followed by checksum data, followed by real data.
+ </dd>
+ </dl>
+ Please see DataNode for the RPC specification.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.BlockReader -->
+ <!-- start class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
+ <class name="DeprecatedUTF8" extends="org.apache.hadoop.io.UTF8"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DeprecatedUTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DeprecatedUTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="DeprecatedUTF8" type="org.apache.hadoop.hdfs.DeprecatedUTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A simple wrapper around {@link org.apache.hadoop.io.UTF8}.
+ This class should be used only when it is absolutely necessary
+ to use {@link org.apache.hadoop.io.UTF8}. The only difference is that
+ using this class does not require "@SuppressWarning" annotation to avoid
+ javac warning. Instead the deprecation is implied in the class name.
+
+ This should be treated as package private class to HDFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient -->
+ <class name="DFSClient" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.io.Closeable"/>
+ <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="Deprecated at 0.21">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(NameNode.getAddress(conf), conf);
+ @see #DFSClient(InetSocketAddress, Configuration)
+ @deprecated Deprecated at 0.21]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(nameNodeAddr, conf, null);
+ @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(nameNodeAddr, null, conf, stats);
+ @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
+ </doc>
+ </constructor>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The locking hierarchy is to first acquire lock on DFSClient object, followed by
+ lock on leasechecker, followed by lock on an individual DFSOutputStream.]]>
+ </doc>
+ </method>
+ <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file system, abandoning all of the leases and files being
+ created and close connections to the namenode.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default block size for this cluster
+ @return the default block size in bytes]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#getPreferredBlockSize(String)]]>
+ </doc>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get server default values for a number of configuration params.
+ @see ClientProtocol#getServerDefaults()]]>
+ </doc>
+ </method>
+ <method name="stringifyToken" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A test method for printing out tokens
+ @param token
+ @return Stringify version of the token]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#getDelegationToken(Text)]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#renewDelegationToken(Token)]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#cancelDelegationToken(Token)]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report corrupt blocks that were discovered by the client.
+ @see ClientProtocol#reportBadBlocks(LocatedBlock[])]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="start" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get block location info about file
+
+ getBlockLocations() returns a list of hostnames that store
+ data for a specific file region. It returns a set of hostnames
+ for every block within the indicated region.
+
+ This function is very useful when writing code that considers
+ data-placement when performing operations. For example, the
+ MapReduce system tries to schedule tasks on the same machines
+ as the data-block the task processes.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #open(String, int, boolean)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="buffersize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="stats" type="org.apache.hadoop.fs.FileSystem.Statistics"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create an input stream that obtains a nodelist from the
+ namenode, and then reads from all the right places. Creates
+ inner subclass of InputStream that does the right out-of-band
+ work.
+ @deprecated Use {@link #open(String, int, boolean)} instead.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="buffersize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create an input stream that obtains a nodelist from the
+ namenode, and then reads from all the right places. Creates
+ inner subclass of InputStream that does the right out-of-band
+ work.]]>
+ </doc>
+ </method>
+ <method name="getNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the namenode associated with this DFSClient object
+ @return the namenode associated with this DFSClient object]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
+ default <code>replication</code> and <code>blockSize<code> and null <code>
+ progress</code>.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
+ default <code>replication</code> and <code>blockSize<code>.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
+ null <code>progress</code>.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, boolean, short, long, Progressable, int)}
+ with default bufferSize.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, FsPermission, EnumSet, short, long,
+ Progressable, int)} with default <code>permission</code>
+ {@link FsPermission#getDefault()}.
+
+ @param src File name
+ @param overwrite overwrite an existing file if true
+ @param replication replication factor for the file
+ @param blockSize maximum block size
+ @param progress interface for reporting client progress
+ @param buffersize underlying buffersize
+
+ @return output stream]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #create(String, FsPermission, EnumSet, boolean, short,
+ long, Progressable, int)} with <code>createParent</code> set to true.]]>
+ </doc>
+ </method>
+ <method name="create" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new dfs file with the specified block replication
+ with write-progress reporting and return an output stream for writing
+ into the file.
+
+ @param src File name
+ @param permission The permission of the directory being created.
+ If null, use default permission {@link FsPermission#getDefault()}
+ @param flag indicates create a new file or create/overwrite an
+ existing file or append to an existing file
+ @param createParent create missing parent directory if true
+ @param replication block replication
+ @param blockSize maximum block size
+ @param progress interface for reporting client progress
+ @param buffersize underlying buffer size
+
+ @return output stream
+
+ @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
+ boolean, short, long) for detailed description of exceptions thrown]]>
+ </doc>
+ </method>
+ <method name="primitiveCreate" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="buffersize" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Same as {{@link #create(String, FsPermission, EnumSet, short, long,
+ Progressable, int)} except that the permission
+ is absolute (ie has already been masked with umask.]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a symbolic link.
+
+ @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Resolve the *first* symlink, if any, in the path.
+
+ @see ClientProtocol#getLinkTarget(String)]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ @param src file name
+ @param replication
+
+ @see ClientProtocol#setReplication(String, short)]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename file or directory.
+ @see ClientProtocol#rename(String, String)
+ @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move blocks from src to trg and delete src
+ See {@link ClientProtocol#concat(String, String [])}.]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename file or directory.
+ @see ClientProtocol#rename(String, String, Options.Rename...)]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete file or directory.
+ See {@link ClientProtocol#delete(String)}.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[delete file or directory.
+ delete contents of the directory if non empty and recursive
+ set to true
+
+ @see ClientProtocol#delete(String, boolean)]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implemented using getFileInfo(src)]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+ No block locations need to be fetched]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <param name="needLocation" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
+ if the application wants to fetch a listing starting from
+ the first entry in the directory
+
+ @see ClientProtocol#getListing(String, byte[], boolean)]]>
+ </doc>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory.
+ @param src The string representation of the path to the file
+ @return object containing information regarding the file
+ or null if file not found
+
+ @see ClientProtocol#getFileInfo(String) for description of exceptions]]>
+ </doc>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory. If src
+ refers to a symlink then the FileStatus of the link is returned.
+ @param src path to a file or directory.
+
+ For description of exceptions thrown
+ @see ClientProtocol#getFileLinkInfo(String)]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+ @param src The file path
+ @return The checksum
+ @see DistributedFileSystem#getFileChecksum(Path)]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="socketFactory" type="javax.net.SocketFactory"/>
+ <param name="socketTimeout" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+ @param src The file path
+ @return The checksum]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions to a file or directory.
+ @param src path name.
+ @param permission
+
+ @see ClientProtocol#setPermission(String, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set file or directory owner.
+ @param src path name.
+ @param username user id.
+ @param groupname user group.
+
+ @see ClientProtocol#setOwner(String, String, String)]]>
+ </doc>
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#getStats()]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
+ See {@link ClientProtocol#refreshNodes()}
+ for more details.
+
+ @see ClientProtocol#refreshNodes()]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+
+ @see ClientProtocol#metaSave(String)]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param permission The permission of the directory being created.
+ If permission == null, use {@link FsPermission#getDefault()}.
+ @param createParent create missing parent directory if true
+
+ @return True if the operation success.
+
+ @see ClientProtocol#mkdirs(String, FsPermission, boolean)]]>
+ </doc>
+ </method>
+ <method name="primitiveMkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same {{@link #mkdirs(String, FsPermission, boolean)} except
+ that the permissions has already been masked against umask.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[set the modification and access time of a file
+
+ @see ClientProtocol#setTimes(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SERVER_DEFAULTS_VALIDITY_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DFSClient can connect to a Hadoop Filesystem and
+ perform basic file tasks. It uses the ClientProtocol
+ to communicate with a NameNode daemon, and connects
+ directly to DataNodes to read/write block data.
+
+ Hadoop DFS users should obtain an instance of
+ DistributedFileSystem, which uses DFSClient to handle
+ filesystem tasks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient -->
+ <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
+ <class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSClient.DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the datanode from which the stream is currently reading.]]>
+ </doc>
+ </method>
+ <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block containing the target position.]]>
+ </doc>
+ </method>
+ <method name="getVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return The visible length of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The Hdfs implementation of {@link FSDataInputStream}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
+ <!-- start class org.apache.hadoop.hdfs.DFSConfigKeys -->
+ <class name="DFSConfigKeys" extends="org.apache.hadoop.fs.CommonConfigurationKeys"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSConfigKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="DFS_BLOCK_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_SIZE_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_DEFAULT" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_STREAM_BUFFER_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_STREAM_BUFFER_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BYTES_PER_CHECKSUM_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BYTES_PER_CHECKSUM_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_WRITE_PACKET_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_MAX_OBJECTS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_MAX_OBJECTS_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT" type="float"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_UPGRADE_PERMISSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_NEED_AUTH_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_CACHED_CONN_RETRY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MIN_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MIN_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_ENABLED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_ENABLED_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_SUPERUSERGROUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_ADMIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_RESTORE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_LIST_LIMIT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_LIST_LIMIT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_EDITS_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_READ_PREFETCH_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_RETRY_WINDOW_BASE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_METRICS_SESSION_ID_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HOST_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_STORAGEID_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HOSTS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HOSTS_EXCLUDE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_SOCKET_TIMEOUT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BALANCER_MOVEDWINWIDTH_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BALANCER_MOVEDWINWIDTH_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_PERMISSION_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_INTERFACE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_INTERFACE_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_NAMESERVER_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DNS_NAMESERVER_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DU_RESERVED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_DU_RESERVED_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HANDLER_COUNT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HANDLER_COUNT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTP_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_MAX_RECEIVER_THREADS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_NUMBLOCKS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_NUMBLOCKS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HEARTBEAT_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HEARTBEAT_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HANDLER_COUNT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_HANDLER_COUNT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SUPPORT_APPEND_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SUPPORT_APPEND_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HTTPS_ENABLE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_HTTPS_ENABLE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_IPC_ADDRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_IPC_ADDRESS_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_MAX_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_REPLICATION_MAX_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DF_INTERVAL_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DF_INTERVAL_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INITIAL_DELAY_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_COMPRESS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_COMPRESS_DEFAULT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_COMPRESSION_CODEC_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_COMPRESSION_CODEC_DEFAULT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_TRANSFER_RATE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_IMAGE_TRANSFER_RATE_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_PLUGINS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_STARTUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_PLUGINS_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_WEB_UGI_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_STARTUP_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_KEYTAB_FILE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_DATANODE_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SECONDARY_NAMENODE_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class contains constants for configuration keys used
+ in hdfs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSConfigKeys -->
+ <!-- start class org.apache.hadoop.hdfs.DFSInputStream -->
+ <class name="DFSInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the datanode from which the stream is currently reading.]]>
+ </doc>
+ </method>
+ <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block containing the target position.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close it down!]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the entire buffer.]]>
+ </doc>
+ </method>
+ <method name="getBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dnAddr" type="java.net.InetSocketAddress"/>
+ <param name="file" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <param name="startOffset" type="long"/>
+ <param name="len" type="long"/>
+ <param name="bufferSize" type="int"/>
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieve a BlockReader suitable for reading.
+ This method will reuse the cached connection to the DN if appropriate.
+ Otherwise, it will create a new connection.
+
+ @param dnAddr Address of the datanode
+ @param file File location
+ @param block The Block object
+ @param blockToken The access token for security
+ @param startOffset The read offset, relative to block head
+ @param len The number of bytes to read
+ @param bufferSize The IO buffer size (not the client buffer size)
+ @param verifyChecksum Whether to verify checksum
+ @param clientName Client name
+ @return New BlockReader instance]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes starting from the specified position.
+
+ @param position start read from this position
+ @param buffer read buffer
+ @param offset offset into buffer
+ @param length number of bytes to read
+
+ @return actual number of bytes read]]>
+ </doc>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to a new arbitrary location]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to given position on a node other than the current node. If
+ a node other than the current node is found, then returns true.
+ If another node could not be found, then returns false.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the size of the remaining available bytes
+ if the size is less than or equal to {@link Integer#MAX_VALUE},
+ otherwise, return {@link Integer#MAX_VALUE}.]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We definitely don't support marks]]>
+ </doc>
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[DFSInputStream provides bytes from a named file. It handles
+ negotiation of the namenode and various datanodes as necessary.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSInputStream -->
+ <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
+ <class name="DFSUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isValidName" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
+ and names which contain a ":" or "/"]]>
+ </doc>
+ </method>
+ <method name="bytes2String" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Converts a byte array to a string using UTF8 encoding.]]>
+ </doc>
+ </method>
+ <method name="string2Bytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Converts a string to a byte array using UTF8 encoding.]]>
+ </doc>
+ </method>
+ <method name="byteArray2String" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathComponents" type="byte[][]"/>
+ <doc>
+ <![CDATA[Given a list of path components returns a path as a UTF8 String]]>
+ </doc>
+ </method>
+ <method name="bytes2byteArray" return="byte[][]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="separator" type="byte"/>
+ <doc>
+ <![CDATA[Splits the array of bytes into array of arrays of bytes
+ on byte separator
+ @param bytes the array of bytes to split
+ @param separator the delimiting byte]]>
+ </doc>
+ </method>
+ <method name="bytes2byteArray" return="byte[][]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="len" type="int"/>
+ <param name="separator" type="byte"/>
+ <doc>
+ <![CDATA[Splits first len bytes in bytes to array of arrays of bytes
+ on byte separator
+ @param bytes the byte array to split
+ @param len the number of bytes to split
+ @param separator the delimiting byte]]>
+ </doc>
+ </method>
+ <method name="locatedBlocks2Locations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
+ <doc>
+ <![CDATA[Convert a LocatedBlocks to BlockLocations[]
+ @param blocks a LocatedBlocks
+ @return an array of BlockLocations]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
+ <!-- start class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
+ <class name="DFSUtil.ErrorSimulator" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSUtil.ErrorSimulator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initializeErrorSimulationEvent"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numberOfEvents" type="int"/>
+ </method>
+ <method name="getErrorSimulation" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="setErrorSimulation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="clearErrorSimulation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[Utility class to facilitate junit test error simulation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Permit paths which explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Normalize paths that explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ </method>
+ <method name="recoverLease" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the lease recovery of a file
+
+ @param f a file
+ @return true if the file is already closed
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="bytesPerChecksum" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="flag" type="java.util.EnumSet"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as create(), except fails if parent directory doesn't already exist.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="org.apache.hadoop.fs.Path"/>
+ <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[THIS IS DFS only operations, it is not part of FileSystem
+ move blocks from srcs to trg
+ and delete srcs afterwards
+ all blocks should be the same size
+ @param trg existing file to append to
+ @param psrcs list of files (same block size, same replication)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ This rename operation is guaranteed to be atomic.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set a directory's quotas
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List all the entries of a directory
+
+ Note that this operation is not atomic for a large directory.
+ The entries of a directory may be fetched from NameNode multiple times.
+ It only guarantees that each name occurs once if a directory
+ undergoes changes between the calls.]]>
+ </doc>
+ </method>
+ <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory with given name and permission, only when
+ parent directory exists.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="primitiveMkdir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the disk usage of the filesystem, including total capacity,
+ used space, and remaining space
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication.
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication.
+ @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ instead]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with no good replicas left. Normally should be
+ zero.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with one of more replica missing.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCorruptBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns count of blocks with at least one replica marked corrupt.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
+ FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[enable/disable/check restoreFaileStorage
+
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
+ files.]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.
+ @throws FileNotFoundException if the file does not exist.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="getDefaultPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getDelegationToken(String)}">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a valid Delegation Token.
+
+ @param renewer Name of the designated renewer for the token
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException
+ @deprecated use {@link #getDelegationToken(String)}]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renew an existing delegation token.
+
+ @param token delegation token obtained earlier
+ @return the new expiration time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Cancel an existing delegation token.
+
+ @param token delegation token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implementation of the abstract FileSystem for the DFS system.
+ This object is the way end-user code interacts with a Hadoop
+ DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <class name="DistributedFileSystem.DiskStatus" extends="org.apache.hadoop.fs.FsStatus"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.fs.FsStatus} instead">
+ <constructor name="DistributedFileSystem.DiskStatus" type="org.apache.hadoop.fs.FsStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[@deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
+ <!-- start class org.apache.hadoop.hdfs.HdfsConfiguration -->
+ <class name="HdfsConfiguration" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HdfsConfiguration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="HdfsConfiguration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="HdfsConfiguration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is here so that when invoked, HdfsConfiguration is class-loaded if
+ it hasn't already been previously loaded. Upon loading the class, the static
+ initializer block above will be executed to add the deprecated keys and to add
+ the default resources. It is safe for this method to be called multiple times
+ as the static initializer block will only get invoked once.
+
+ This replaces the previously, dangerous practice of other classes calling
+ Configuration.addDefaultResource("hdfs-default.xml") directly without loading
+ HdfsConfiguration class first, thereby skipping the key deprecation]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Adds deprecated keys into the configuration.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HdfsConfiguration -->
+ <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HDFSPolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
+ <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDateFormat" return="java.text.SimpleDateFormat"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCanonicalServiceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
+ @param path The path component of the URL
+ @param query The query component of the URL]]>
+ </doc>
+ </method>
+ <method name="updateQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="nnAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ran" type="java.util.Random"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="HFTP_TIMEZONE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HFTP_DATE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HFTP_SERVICE_NAME_KEY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="df" type="java.lang.ThreadLocal"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTP.
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HsftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS. The
+ following implementation provides a limited, read-only interface to a
+ filesystem over HTTPS.
+
+ @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
+ @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="javax.net.ssl.HostnameVerifier"/>
+ <constructor name="HsftpFileSystem.DummyHostnameVerifier"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="verify" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostname" type="java.lang.String"/>
+ <param name="session" type="javax.net.ssl.SSLSession"/>
+ </method>
+ <doc>
+ <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
+ <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
+ <class name="HsftpFileSystem.DummyTrustManager" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="javax.net.ssl.X509TrustManager"/>
+ <constructor name="HsftpFileSystem.DummyTrustManager"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="checkClientTrusted"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="chain" type="java.security.cert.X509Certificate[]"/>
+ <param name="authType" type="java.lang.String"/>
+ </method>
+ <method name="checkServerTrusted"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="chain" type="java.security.cert.X509Certificate[]"/>
+ <param name="authType" type="java.lang.String"/>
+ </method>
+ <method name="getAcceptedIssuers" return="java.security.cert.X509Certificate[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Dummy trustmanager that is used to trust all server certificates]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+ <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception that happens when you ask to create a file that already
+ is being created, but is not closed yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Block" type="java.io.File, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the blockid from the given filename]]>
+ </doc>
+ </constructor>
+ <method name="isBlockFilename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.io.File"/>
+ </method>
+ <method name="filename2id" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="isMetaFilename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metaFile" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get generation stamp from the name of the metafile name]]>
+ </doc>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metaFile" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the blockId from the name of the metafile name]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <param name="len" type="long"/>
+ <param name="genStamp" type="long"/>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setBlockId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bid" type="long"/>
+ </method>
+ <method name="getBlockName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytes" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setNumBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="BLOCK_FILE_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="METADATA_EXTENSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockFilePattern" type="java.util.regex.Pattern"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metaFilePattern" type="java.util.regex.Pattern"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Block is a Hadoop FS primitive, identified by a
+ long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <class name="BlockListAsLongs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <constructor name="BlockListAsLongs" type="java.util.List, java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create block report from finalized and under construction lists of blocks.
+
+ @param finalized - list of finalized blocks
+ @param uc - list of under construction blocks]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockListAsLongs"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockListAsLongs" type="long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param iBlockList - BlockListALongs create from this long[] parameter]]>
+ </doc>
+ </constructor>
+ <method name="getBlockListAsLongs" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an iterator over blocks in the block report.]]>
+ </doc>
+ </method>
+ <method name="getBlockReportIterator" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns {@link BlockReportIterator}.]]>
+ </doc>
+ </method>
+ <method name="getNumberOfBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of blocks
+ @return - the number of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-id of the indexTh block
+ @param index - the block whose block-id is desired
+ @return the block-id]]>
+ </doc>
+ </method>
+ <method name="getBlockLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The block-len of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the block-len]]>
+ </doc>
+ </method>
+ <method name="getBlockGenStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[The generation stamp of the indexTh block
+ @param index - the block whose block-len is desired
+ @return - the generation stamp]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides an interface for accessing list of blocks that
+ has been implemented as long[].
+ This class is useful for block report. Rather than send block reports
+ as a Block[] we can send it as a long[].
+
+ The structure of the array is as follows:
+ 0: the length of the finalized replica list;
+ 1: the length of the under-construction replica list;
+ - followed by finalized replica list where each replica is represented by
+ 3 longs: one for the blockId, one for the block length, and one for
+ the generation stamp;
+ - followed by the invalid replica represented with three -1s;
+ - followed by the under-construction replica list where each replica is
+ represented by 4 longs: three for the block id, length, generation
+ stamp, and the forth for the replica state.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
+ <class name="BlockListAsLongs.BlockReportIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Iterator"/>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the state of the current replica.
+ The state corresponds to the replica returned
+ by the latest {@link #next()}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Iterates over blocks in the block report.
+ Avoids object allocation on each iteration.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <interface name="ClientDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the visible length of a replica.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[6: recoverBlock() removed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An client-datanode protocol for block recovery]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <interface name="ClientProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get locations of the blocks of the specified file within the specified range.
+ DataNode locations for each block are sorted by
+ the proximity to the client.
+ <p>
+ Return {@link LocatedBlocks} which contains
+ file length, blocks and their locations.
+ DataNode locations for each block are sorted by
+ the distance to the client's address.
+ <p>
+ The client will then have to contact
+ one of the indicated DataNodes to obtain the actual data.
+
+ @param src file name
+ @param offset range start offset
+ @param length range length
+
+ @return file length and array of blocks with their locations
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> does not exist
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get server default values for a number of configuration params.
+ @return a set of server default configuration values
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="AlreadyBeingCreatedException" type="org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException"/>
+ <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
+ <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new file entry in the namespace.
+ <p>
+ This will create an empty file specified by the source path.
+ The path should reflect a full path originated at the root.
+ The name-node does not have a notion of "current" directory for a client.
+ <p>
+ Once created, the file is visible and available for read to other clients.
+ Although, other clients cannot {@link #delete(String, boolean)}, re-create or
+ {@link #rename(String, String)} it until the file is completed
+ or explicitly as a result of lease expiration.
+ <p>
+ Blocks have a maximum size. Clients that intend to create
+ multi-block files must also use
+ {@link #addBlock(String, String, Block, DatanodeInfo[])}
+
+ @param src path of the file being created.
+ @param masked masked permission.
+ @param clientName name of the current client.
+ @param flag indicates whether the file should be
+ overwritten if it already exists or create if it does not exist or append.
+ @param createParent create missing parent directory if true
+ @param replication block replication factor.
+ @param blockSize maximum block size.
+
+ @throws AccessControlException If access is denied
+ @throws AlreadyBeingCreatedException if the path does not exist.
+ @throws DSQuotaExceededException If file creation violates disk space
+ quota restriction
+ @throws FileAlreadyExistsException If file <code>src</code> already exists
+ @throws FileNotFoundException If parent of <code>src</code> does not exist
+ and <code>createParent</code> is false
+ @throws ParentNotDirectoryException If parent of <code>src</code> is not a
+ directory.
+ @throws NSQuotaExceededException If file creation violates name space
+ quota restriction
+ @throws SafeModeException create not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred
+
+ RuntimeExceptions:
+ @throws InvalidPathException Path <code>src</code> is invalid]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to the end of the file.
+ @param src path of the file being created.
+ @param clientName name of the current client.
+ @return information about the last partial block if any.
+ @throws AccessControlException if permission to append file is
+ denied by the system. As usually on the client side the exception will
+ be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ Allows appending to an existing file if the server is
+ configured with the parameter dfs.support.append set to true, otherwise
+ throws an IOException.
+
+ @throws AccessControlException If permission to append to file is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws DSQuotaExceededException If append violates disk space quota
+ restriction
+ @throws SafeModeException append not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred.
+
+ RuntimeExceptions:
+ @throws UnsupportedOperationException if append is not supported]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ <p>
+ The NameNode sets replication to the new value and returns.
+ The actual block replication is not expected to be performed during
+ this method call. The blocks will be populated or removed in the
+ background as the result of the routine block maintenance procedures.
+
+ @param src file name
+ @param replication new replication
+
+ @return true if successful;
+ false if file does not exist or is a directory
+
+ @throws AccessControlException If access is denied
+ @throws DSQuotaExceededException If replication violates disk space
+ quota restriction
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws SafeModeException not allowed in safemode
+ @throws UnresolvedLinkException if <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file/directory.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws SafeModeException not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set Owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param src
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws SafeModeException not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client can give up on a blcok by calling abandonBlock().
+ The client can then
+ either obtain a new block, or complete or abandon the file.
+ Any partial writes to the block will be discarded.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException file <code>src</code> is not found
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludeNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="NotReplicatedYetException" type="org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A client that wants to write an additional block to the
+ indicated filename (which must currently be open for writing)
+ should call addBlock().
+
+ addBlock() allocates a new block and datanodes the block data
+ should be replicated to.
+
+ addBlock() also commits the previous block by reporting
+ to the name-node the actual generation stamp and the length
+ of the block that the client has transmitted to data-nodes.
+
+ @param src the file being created
+ @param clientName the name of the client that adds the block
+ @param previous previous block
+ @param excludeNodes a list of nodes that should not be
+ allocated for the current block
+
+ @return LocatedBlock allocated block information.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws NotReplicatedYetException previous blocks of the file are not
+ replicated yet. Blocks cannot be added until replication
+ completes.
+ @throws SafeModeException create not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client is done writing data to the given filename, and would
+ like to complete it.
+
+ The function returns whether the file has been closed successfully.
+ If the function returns false, the caller should try again.
+
+ close() also commits the last block of the file by reporting
+ to the name-node the actual generation stamp and the length
+ of the block that the client has transmitted to data-nodes.
+
+ A call to complete() will not return true until all the file's
+ blocks have been replicated the minimum number of times. Thus,
+ DataNode failures may cause a client to call complete() several
+ times before succeeding.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws SafeModeException create not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client wants to report corrupted blocks (blocks with specified
+ locations on datanodes).
+ @param blocks Array of located blocks to report]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename an item in the file system namespace.
+ @param src existing file or directory name.
+ @param dst new name.
+ @return true if successful, or false if the old name does not exist
+ or if the new name already belongs to the namespace.
+
+ @throws IOException an I/O error occurred
+
+ @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Moves blocks from srcs to trg and delete srcs
+
+ @param trg existing file
+ @param srcs - list of existing files (same block size, same replication)
+ @throws IOException if some arguments are invalid
+ @throws UnresolvedLinkException if <code>trg</code> or <code>srcs</code>
+ contains a symlink]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
+ <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename src to dst.
+ <ul>
+ <li>Fails if src is a file and dst is a directory.
+ <li>Fails if src is a directory and dst is a file.
+ <li>Fails if the parent of dst does not exist or is a file.
+ </ul>
+ <p>
+ Without OVERWRITE option, rename fails if the dst already exists.
+ With OVERWRITE option, rename overwrites the dst, if it is a file
+ or an empty directory. Rename fails if dst is a non-empty directory.
+ <p>
+ This implementation of rename is atomic.
+ <p>
+ @param src existing file or directory name.
+ @param dst new name.
+ @param options Rename options
+
+ @throws AccessControlException If access is denied
+ @throws DSQuotaExceededException If rename violates disk space
+ quota restriction
+ @throws FileAlreadyExistsException If <code>dst</code> already exists and
+ <code>options</options> has {@link Rename#OVERWRITE} option
+ false.
+ @throws FileNotFoundException If <code>src</code> does not exist
+ @throws NSQuotaExceededException If rename violates namespace
+ quota restriction
+ @throws ParentNotDirectoryException If parent of <code>dst</code>
+ is not a directory
+ @throws SafeModeException rename not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> or
+ <code>dst</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #delete(String, boolean)} istead.">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ Any blocks belonging to the deleted files will be garbage-collected.
+
+ @param src existing name.
+ @return true only if the existing file or directory was actually removed
+ from the file system.
+ @throws UnresolvedLinkException if <code>src</code> contains a symlink.
+ @deprecated use {@link #delete(String, boolean)} istead.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete the given file or directory from the file system.
+ <p>
+ same as delete but provides a way to avoid accidentally
+ deleting non empty directories programmatically.
+ @param src existing name
+ @param recursive if true deletes a non empty directory recursively,
+ else throws an exception.
+ @return true only if the existing file or directory was actually removed
+ from the file system.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>src</code> is not found
+ @throws SafeModeException create not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
+ <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a directory (or hierarchy of directories) with the given
+ name and permission.
+
+ @param src The path of the directory being created
+ @param masked The masked permission of the directory being created
+ @param createParent create missing parent directory if true
+
+ @return True if the operation success.
+
+ @throws AccessControlException If access is denied
+ @throws FileAlreadyExistsException If <code>src</code> already exists
+ @throws FileNotFoundException If parent of <code>src</code> does not exist
+ and <code>createParent</code> is false
+ @throws NSQuotaExceededException If file creation violates quota restriction
+ @throws ParentNotDirectoryException If parent of <code>src</code>
+ is not a directory
+ @throws SafeModeException create not allowed in safemode
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred.
+
+ RunTimeExceptions:
+ @throws InvalidPathException If <code>src</code> is invalid]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <param name="needLocation" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ @param src the directory name
+ @param startAfter the name to start listing after encoded in java UTF8
+ @param needLocation if the FileStatus should contain block locations
+
+ @return a partial listing starting after startAfter
+
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>src</code> is not found
+ @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Client programs can cause stateful changes in the NameNode
+ that affect other clients. A client may obtain a file and
+ neither abandon nor complete it. A client might hold a series
+ of locks that prevent other clients from proceeding.
+ Clearly, it would be bad if a client held a bunch of locks
+ that it never gave up. This can happen easily if the client
+ dies unexpectedly.
+ <p>
+ So, the NameNode will revoke the locks and live file-creates
+ for clients that it thinks have died. A client tells the
+ NameNode that it is still alive by periodically calling
+ renewLease(). If a certain amount of time passes since
+ the last call to renewLease(), the NameNode assumes the
+ client has died.
+
+ @throws AccessControlException permission denied
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="recoverLease" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start lease recovery.
+ Lightweight NameNode operation to trigger lease recovery
+
+ @param src path of the file to start lease recovery
+ @param clientName name of the current client
+ @return true if the file is already closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a set of statistics about the filesystem.
+ Right now, only three values are returned.
+ <ul>
+ <li> [0] contains the total storage capacity of the system, in bytes.</li>
+ <li> [1] contains the total used space of the system, in bytes.</li>
+ <li> [2] contains the available storage of the system, in bytes.</li>
+ <li> [3] contains number of under replicated blocks in the system.</li>
+ <li> [4] contains number of blocks with a corrupt replica. </li>
+ <li> [5] contains number of blocks without any good replicas left. </li>
+ </ul>
+ Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
+ actual numbers to index into the array.]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a report on the system's current datanodes.
+ One DatanodeInfo object is returned for each DataNode.
+ Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
+ otherwise all datanodes if type is ALL.]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Get the block size for the given file.
+ @param filename The name of the file
+ @return The number of bytes in each block
+ @throws IOException
+ @throws UnresolvedLinkException if the path contains a symlink.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+ <p>
+ Safe mode is a name node state when it
+ <ol><li>does not accept changes to name space (read-only), and</li>
+ <li>does not replicate or delete blocks.</li></ol>
+
+ <p>
+ Safe mode is entered automatically at name node startup.
+ Safe mode can also be entered manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ <p>
+ At startup the name node accepts data node reports collecting
+ information about block locations.
+ In order to leave safe mode it needs to collect a configurable
+ percentage called threshold of blocks, which satisfy the minimal
+ replication condition.
+ The minimal replication condition is that each block must have at least
+ <tt>dfs.namenode.replication.min</tt> replicas.
+ When the threshold is reached the name node extends safe mode
+ for a configurable amount of time
+ to let the remaining data nodes to check in before it
+ will start replicating missing blocks.
+ Then the name node leaves safe mode.
+ <p>
+ If safe mode is turned on manually using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ then the name node stays in safe mode until it is manually turned off
+ using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ Current state of the name node can be verified using
+ {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ <h4>Configuration parameters:</h4>
+ <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
+ <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
+ <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
+
+ <h4>Special cases:</h4>
+ The name node does not enter safe mode at startup if the threshold is
+ set to 0 or if the name space is empty.<br>
+ If the threshold is set to 1 then all blocks need to have at least
+ minimal replication.<br>
+ If the threshold value is greater than 1 then the name node will not be
+ able to turn off safe mode automatically.<br>
+ Safe mode can always be turned off manually.
+
+ @param action <ul> <li>0 leave safe mode;</li>
+ <li>1 enter safe mode;</li>
+ <li>2 get safe mode state.</li></ul>
+ @return <ul><li>0 if the safe mode is OFF or</li>
+ <li>1 if the safe mode is ON.</li></ul>
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save namespace image.
+ <p>
+ Saves current namespace into storage directories and reset edits log.
+ Requires superuser privilege and safe mode.
+
+ @throws AccessControlException if the superuser privilege is violated.
+ @throws IOException if image creation failed.]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[Enable/Disable restore failed storage.
+ <p>
+ sets flag to enable restore of failed storage replicas
+
+ @throws AccessControlException if the superuser privilege is violated.]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Tells the namenode to reread the hosts and exclude files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previous upgrade.
+ Remove file system state saved during the upgrade.
+ The upgrade will become irreversible.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
+
+ @param action {@link FSConstants.UpgradeAction} to perform
+ @return upgrade status information or null if no upgrades are in progress
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode data structures into specified file. If the file
+ already exists, then append.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory.
+ @param src The string representation of the path to the file
+
+ @return object containing information regarding the file
+ or null if file not found
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>src</code> is not found
+ @throws UnresolvedLinkException if the path contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file or directory. If the path
+ refers to a symlink then the FileStatus of the symlink is returned.
+ @param src The string representation of the path to the file
+
+ @return object containing information regarding the file
+ or null if file not found
+
+ @throws AccessControlException permission denied
+ @throws UnresolvedLinkException if <code>src</code> contains a symlink
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
+ @param path The string representation of the path
+
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>path</code> is not found
+ @throws UnresolvedLinkException if <code>path</code> contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the quota for a directory.
+ @param path The string representation of the path to the directory
+ @param namespaceQuota Limit on the number of names in the tree rooted
+ at the directory
+ @param diskspaceQuota Limit on disk space occupied all the files under
+ this directory.
+ <br><br>
+
+ The quota can have three types of values : (1) 0 or more will set
+ the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
+ the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
+ implies the quota will be reset. Any other value is a runtime error.
+
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>path</code> is not found
+ @throws QuotaExceededException if the directory size
+ is greater than the given quota
+ @throws UnresolvedLinkException if the <code>path</code> contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="client" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write all metadata for this file into persistent storage.
+ The file must be currently open for writing.
+ @param src The string representation of the path
+ @param client The string representation of the client
+
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>src</code> is not found
+ @throws UnresolvedLinkException if <code>src</code> contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the modification and access time of the file to the specified time.
+ @param src The string representation of the path
+ @param mtime The number of milliseconds since Jan 1, 1970.
+ Setting mtime to -1 means that modification time should not be set
+ by this call.
+ @param atime The number of milliseconds since Jan 1, 1970.
+ Setting atime to -1 means that access time should not be set
+ by this call.
+
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException file <code>src</code> is not found
+ @throws UnresolvedLinkException if <code>src</code> contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create symlink to a file or directory.
+ @param target The path of the destination that the
+ link points to.
+ @param link The path of the link being created.
+ @param dirPerm permissions to use when creating parent directories
+ @param createParent - if true then missing parent dirs are created
+ if false then parent must exist
+
+ @throws AccessControlException permission denied
+ @throws FileAlreadyExistsException If file <code>link</code> already exists
+ @throws FileNotFoundException If parent of <code>link</code> does not exist
+ and <code>createParent</code> is false
+ @throws ParentNotDirectoryException If parent of <code>link</code> is not a
+ directory.
+ @throws UnresolvedLinkException if <code>link</target> contains a symlink.
+ @throws IOException If an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the target of the given symlink. If there is an intermediate
+ symlink in the path (ie a symlink leading up to the final path component)
+ then the given path is returned with this symlink resolved.
+
+ @param path The path with a link that needs resolution.
+ @return The path after resolving the first symbolic link in the path.
+ @throws AccessControlException permission denied
+ @throws FileNotFoundException If <code>path</code> does not exist
+ @throws IOException If the given path does not refer to a symlink
+ or an I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a new generation stamp together with an access token for
+ a block under construction
+
+ This method is called only when a client needs to recover a failed
+ pipeline or set up a pipeline for appending to a block.
+
+ @param block a block
+ @param clientName the name of the client
+ @return a located block with a new generation stamp and an access token
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="updatePipeline"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update a pipeline for a block under construction
+
+ @param clientName the name of the client
+ @param oldBlock the old block
+ @param newBlock the new block containing new generation stamp and length
+ @param newNodes datanodes in the pipeline
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a valid Delegation Token.
+
+ @param renewer the designated renewer for the token
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renew an existing delegation token.
+
+ @param token delegation token obtained earlier
+ @return the new expiration time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Cancel an existing delegation token.
+
+ @param token delegation token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compared to the previous version the following changes have been introduced:
+ (Only the latest change is reflected.
+ The log of historical changes can be retrieved from the svn).
+ 65: recoverLease return if the file is closed or not]]>
+ </doc>
+ </field>
+ <field name="GET_STATS_CAPACITY_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_USED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_REMAINING_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[ClientProtocol is used by user code via
+ {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
+ with the NameNode. User code can manipulate the directory namespace,
+ as well as open/close file streams, etc.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <class name="DatanodeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID("").]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID copy constructor
+
+ @param from]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeID
+ @param nodeName (hostname:portNumber)
+ @param storageID data storage ID
+ @param infoPort info server port
+ @param ipcPort ipc server port]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname:portNumber.]]>
+ </doc>
+ </method>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
+ </doc>
+ </method>
+ <method name="getIpcPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
+ </doc>
+ </method>
+ <method name="setStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storageID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[sets the data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname and no :portNumber.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="updateRegInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Update fields when a new registration request comes in.
+ Note that this does not update storageID.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[Comparable.
+ Basis of compare is the String name (host:portNumber) only.
+ @param that
+ @return as specified by Comparable.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageID" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="infoPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeID is composed of the data node
+ name (hostname:portNumber) and the data storage ID,
+ which it currently represents.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw capacity.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getNonDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node as percentage of present capacity]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw free space.]]>
+ </doc>
+ </method>
+ <method name="getRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The remaining space as percentage of configured capacity.]]>
+ </doc>
+ </method>
+ <method name="getLastUpdate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="getXceiverCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[number of active connections]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="capacity" type="long"/>
+ <doc>
+ <![CDATA[Sets raw capacity.]]>
+ </doc>
+ </method>
+ <method name="setRemaining"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="remaining" type="long"/>
+ <doc>
+ <![CDATA[Sets raw free space.]]>
+ </doc>
+ </method>
+ <method name="setLastUpdate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lastUpdate" type="long"/>
+ <doc>
+ <![CDATA[Sets time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="setXceiverCount"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xceiverCount" type="int"/>
+ <doc>
+ <![CDATA[Sets number of active connections]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[rack name]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the rack name]]>
+ </doc>
+ </method>
+ <method name="getHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setHostName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="getDatanodeReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for reporting the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="dumpDatanode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for printing the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="startDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop decommissioning a node.
+ old state.]]>
+ </doc>
+ </method>
+ <method name="isDecommissionInProgress" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node is in the process of being decommissioned]]>
+ </doc>
+ </method>
+ <method name="isDecommissioned" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the node has been decommissioned.]]>
+ </doc>
+ </method>
+ <method name="setDecommissioned"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sets the admin state to indicate that decommission is complete.]]>
+ </doc>
+ </method>
+ <method name="setAdminState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
+ <doc>
+ <![CDATA[Sets the admin state of this node.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a DatanodeInfo]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <field name="capacity" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dfsUsed" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="remaining" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="lastUpdate" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="xceiverCount" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="hostName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HostName as supplied by the datanode during registration as its
+ name. Namenode uses datanode IP address as the name.]]>
+ </doc>
+ </field>
+ <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeInfo represents the status of a DataNode.
+ This object is used for communication in the
+ Datanode Protocol and the Client Protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <interface name="DataTransferProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="DATA_TRANSFER_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version for data transfers between clients and datanodes
+ This should change when serialization of DatanodeInfo, not just
+ when protocol changes. It is not very obvious.]]>
+ </doc>
+ </field>
+ <field name="OP_WRITE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.WRITE_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.WRITE_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_READ_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.READ_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.READ_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_READ_METADATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="As of version 15, OP_READ_METADATA is no longer supported.">
+ <doc>
+ <![CDATA[@deprecated As of version 15, OP_READ_METADATA is no longer supported.]]>
+ </doc>
+ </field>
+ <field name="OP_REPLACE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_COPY_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.COPY_BLOCK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.COPY_BLOCK instead.]]>
+ </doc>
+ </field>
+ <field name="OP_BLOCK_CHECKSUM" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.SUCCESS instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.SUCCESS instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_INVALID" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_INVALID instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_INVALID instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_EXISTS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_EXISTS instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_EXISTS instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_ERROR_ACCESS_TOKEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.]]>
+ </doc>
+ </field>
+ <field name="OP_STATUS_CHECKSUM_OK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="Deprecated at 0.21. Use Status.CHECKSUM_OK instead.">
+ <doc>
+ <![CDATA[@deprecated Deprecated at 0.21. Use Status.CHECKSUM_OK instead.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Transfer data to/from datanode using a streaming protocol.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
+ <class name="DataTransferProtocol.BlockConstructionStage" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getRecoveryStage" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the recovery stage of this stage]]>
+ </doc>
+ </method>
+ <field name="PIPELINE_SETUP_APPEND" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The enumerates are always listed as regular stage followed by the
+ recovery stage.
+ Changing this order will make getRecoveryStage not working.]]>
+ </doc>
+ </field>
+ <field name="PIPELINE_SETUP_APPEND_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_STREAMING" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_SETUP_STREAMING_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_CLOSE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_CLOSE_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PIPELINE_SETUP_CREATE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
+ <class name="DataTransferProtocol.Op" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="WRITE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_METADATA" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REPLACE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COPY_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="code" type="byte"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The code for this operation.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Operation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader -->
+ <class name="DataTransferProtocol.PacketHeader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DataTransferProtocol.PacketHeader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DataTransferProtocol.PacketHeader" type="int, long, long, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDataLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isLastPacketInBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSeqno" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getOffsetInBlock" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPacketLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="putInBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Write the header into the buffer.
+ This requires that PKT_HEADER_LEN bytes are available.]]>
+ </doc>
+ </method>
+ <method name="sanityCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lastSeqNo" type="long"/>
+ <doc>
+ <![CDATA[Perform a sanity check on the packet, returning true if it is sane.
+ @param lastSeqNo the previous sequence number received - we expect the current
+ sequence number to be larger by 1.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="PKT_HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Header size for a packet]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Header data for each packet that goes through the read/write pipelines.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
+ <class name="DataTransferProtocol.PipelineAck" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DataTransferProtocol.PipelineAck"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DataTransferProtocol.PipelineAck" type="long, org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param seqno sequence number
+ @param replies an array of replies]]>
+ </doc>
+ </constructor>
+ <method name="getSeqno" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the sequence number
+ @return the sequence number]]>
+ </doc>
+ </method>
+ <method name="getNumOfReplies" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of replies
+ @return the number of replies]]>
+ </doc>
+ </method>
+ <method name="getReply" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[get the ith reply
+ @return the the ith reply]]>
+ </doc>
+ </method>
+ <method name="isSuccess" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if this ack contains error status
+ @return true if all statuses are SUCCESS]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writable interface]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="UNKOWN_SEQNO" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[reply]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
+ <class name="DataTransferProtocol.Receiver" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataTransferProtocol.Receiver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readOp" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Op. It also checks protocol version.]]>
+ </doc>
+ </method>
+ <method name="processOp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process op by the corresponding method.]]>
+ </doc>
+ </method>
+ <method name="opReadBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_READ_BLOCK method. Read a block.]]>
+ </doc>
+ </method>
+ <method name="opWriteBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="pipelineSize" type="int"/>
+ <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
+ <param name="newGs" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_WRITE_BLOCK method.
+ Write a block.]]>
+ </doc>
+ </method>
+ <method name="opReplaceBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="sourceId" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_REPLACE_BLOCK method.
+ It is used for balancing purpose; send to a destination]]>
+ </doc>
+ </method>
+ <method name="opCopyBlock"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_COPY_BLOCK method. It is used for balancing purpose; send to
+ a proxy source.]]>
+ </doc>
+ </method>
+ <method name="opBlockChecksum"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Abstract OP_BLOCK_CHECKSUM method.
+ Get the checksum of a block]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Receiver]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
+ <class name="DataTransferProtocol.Sender" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataTransferProtocol.Sender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="op"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a operation.]]>
+ </doc>
+ </method>
+ <method name="opReadBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockOffset" type="long"/>
+ <param name="blockLen" type="long"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_READ_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opWriteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="pipelineSize" type="int"/>
+ <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
+ <param name="newGs" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <param name="client" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_WRITE_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opReplaceBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="storageId" type="java.lang.String"/>
+ <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_REPLACE_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opCopyBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_COPY_BLOCK]]>
+ </doc>
+ </method>
+ <method name="opBlockChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Send OP_BLOCK_CHECKSUM]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sender]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
+ <class name="DataTransferProtocol.Status" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <method name="writeOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_INVALID" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_EXISTS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR_ACCESS_TOKEN" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_OK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Status]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
+ <class name="DirectoryListing" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DirectoryListing"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DirectoryListing" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[], int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param partialListing a partial listing of a directory
+ @param remainingEntries number of entries that are left to be listed]]>
+ </doc>
+ </constructor>
+ <method name="getPartialListing" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the partial listing of file status
+ @return the partial listing of file status]]>
+ </doc>
+ </method>
+ <method name="getRemainingEntries" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of remaining entries that are left to be listed
+ @return the number of remaining entries that are left to be listed]]>
+ </doc>
+ </method>
+ <method name="hasMore" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if there are more entries that are left to be listed
+ @return true if there are more entries that are left to be listed;
+ return false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getLastName" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the last name in this list
+ @return the last name in the list if it is not empty; otherwise return null]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class defines a partial listing of a directory to support
+ iterative directory listing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
+ <class name="DSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DSQuotaExceededException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DSQuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DSQuotaExceededException" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
+ <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <interface name="FSConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="MIN_BLOCKS_FOR_WRITE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_INVALIDATE_CHUNK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_DONT_SET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOTA_RESET" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEARTBEAT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_HARDLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_RECOVER_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_DEPTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SMALL_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BYTES_PER_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_WRITE_PACKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_REPLICATION_FACTOR" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_FILE_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HDFS_URI_SCHEME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[URI Scheme for hdfs://namenode/ URIs.]]>
+ </doc>
+ </field>
+ <field name="LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Please see {@link LayoutVersion} on adding new layout version.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Some handy constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Distributed upgrade actions:
+
+ 1. Get upgrade status.
+ 2. Get detailed upgrade status.
+ 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
+ <class name="HdfsFileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="HdfsFileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="HdfsFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param length the number of bytes the file has
+ @param isdir if the path is a directory
+ @param block_replication the replication factor
+ @param blocksize the block size
+ @param modification_time modification time
+ @param access_time access time
+ @param permission permission
+ @param owner the owner of the path
+ @param group the group of the path
+ @param path the local name in java UTF8 encoding the same as that in-memory]]>
+ </doc>
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of this file, in bytes.
+ @return the length of this file, in bytes.]]>
+ </doc>
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="isSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a symbolic link?
+ @return true if this is a symbolic link]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file.]]>
+ </doc>
+ </method>
+ <method name="isEmptyLocalName" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if the local name is empty
+ @return true if the name is empty]]>
+ </doc>
+ </method>
+ <method name="getLocalName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the string representation of the local name
+ @return the local name in string]]>
+ </doc>
+ </method>
+ <method name="getLocalNameInBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Java UTF8 representation of the local name
+ @return the local name in java UTF8]]>
+ </doc>
+ </method>
+ <method name="getFullName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the string representation of the full path name
+ @param parent the parent path
+ @return the full path in string]]>
+ </doc>
+ </method>
+ <method name="getFullPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Get the full path
+ @param parent the parent path
+ @return the full path]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the string representation of the symlink.
+ @return the symlink as a string.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_NAME" type="byte[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Interface that represents the over the wire information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus -->
+ <class name="HdfsLocatedFileStatus" extends="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HdfsLocatedFileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="HdfsLocatedFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[], org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param length size
+ @param isdir if this is directory
+ @param block_replication the file's replication factor
+ @param blocksize the file's block size
+ @param modification_time most recent modification time
+ @param access_time most recent access time
+ @param permission permission
+ @param owner owner
+ @param group group
+ @param symlink symbolic link
+ @param path local path name in java UTF8 format
+ @param locations block locations]]>
+ </doc>
+ </constructor>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the over the wire information
+ including block locations for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion -->
+ <class name="LayoutVersion" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LayoutVersion"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets formatted string that describes {@link LayoutVersion} information.]]>
+ </doc>
+ </method>
+ <method name="supports" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"/>
+ <param name="lv" type="int"/>
+ <doc>
+ <![CDATA[Returns true if a given feature is supported in the given layout version
+ @param f Feature
+ @param lv LayoutVersion
+ @return true if {@code f} is supported in layout version {@code lv}]]>
+ </doc>
+ </method>
+ <method name="getCurrentLayoutVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current layout version]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class tracks changes in the layout version of HDFS.
+
+ Layout version is changed for following reasons:
+ <ol>
+ <li>The layout of how namenode or datanode stores information
+ on disk changes.</li>
+ <li>A new operation code is added to the editlog.</li>
+ <li>Modification such as format of a record, content of a record
+ in editlog or fsimage.</li>
+ </ol>
+ <br>
+ <b>How to update layout version:<br></b>
+ When a change requires new layout version, please add an entry into
+ {@link Feature} with a short enum name, new layout version and description
+ of the change. Please see {@link Feature} for further details.
+ <br>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
+ <class name="LayoutVersion.Feature" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NAMESPACE_QUOTA" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FILE_ACCESS_TIME" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DISKSPACE_QUOTA" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STICKY_BIT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="APPEND_RBW_DIR" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ATOMIC_RENAME" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CONCAT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SYMLINKS" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DELEGATION_TOKEN" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FSIMAGE_COMPRESSION" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FSIMAGE_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REMOVE_REL13_DISK_LAYOUT_SUPPORT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNUSED_28" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNUSED_29" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNUSED_30" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RESERVED_REL20_203" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RESERVED_REL20_204" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RESERVED_REL22" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Enums for features that change the layout version.
+ <br><br>
+ To add a new layout version:
+ <ul>
+ <li>Define a new enum constant with a short enum name, the new layout version
+ and description of the added feature.</li>
+ <li>When adding a layout version with an ancestor that is not same as
+ its immediate predecessor, use the constructor where a spacific ancestor
+ can be passed.
+ </li>
+ </ul>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <class name="LocatedBlock" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlock"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlockToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setBlockToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ </method>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isCorrupt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read LocatedBlock from in.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
+ objects. It tells where to find a Block.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <class name="LocatedBlocks" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="LocatedBlocks"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocatedBlocks" type="long, boolean, java.util.List, org.apache.hadoop.hdfs.protocol.LocatedBlock, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLocatedBlocks" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get located blocks.]]>
+ </doc>
+ </method>
+ <method name="getLastLocatedBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the last located block.]]>
+ </doc>
+ </method>
+ <method name="isLastBlockComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the last block completed?]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[Get located block.]]>
+ </doc>
+ </method>
+ <method name="locatedBlockCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get number of located blocks.]]>
+ </doc>
+ </method>
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isUnderConstruction" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return ture if file was under construction when
+ this LocatedBlocks was constructed, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="findBlock" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Find block containing specified offset.
+
+ @return block if found, or null otherwise.]]>
+ </doc>
+ </method>
+ <method name="insertRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockIdx" type="int"/>
+ <param name="newBlocks" type="java.util.List"/>
+ </method>
+ <method name="getInsertIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="binSearchResult" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collection of blocks with their locations and the file length.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
+ <class name="NSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NSQuotaExceededException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NSQuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NSQuotaExceededException" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <class name="QuotaExceededException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="QuotaExceededException"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="long, long"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPathName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="serialVersionUID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="pathName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="quota" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="count" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when modification to HDFS results in violation
+ of a directory quota. A directory quota might be namespace quota (limit
+ on number of files and directories) or a diskspace quota (limit on space
+ taken by all the file under the directory tree). <br> <br>
+
+ The message for the exception specifies the directory where the quota
+ was violated and actual quotas. Specific message is generated in the
+ corresponding Exception class:
+ DSQuotaExceededException or
+ NSQuotaExceededException]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
+ <class name="RecoveryInProgressException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecoveryInProgressException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Exception indicating that a replica is already being recovery.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
+ <class name="UnregisteredNodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The exception is thrown if a different data-node claims the same
+ storage id as the existing one.
+
+ @param nodeID unregistered data-node
+ @param storedNode data-node stored in the system with this storage id]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a node that has not previously
+ registered is trying to access the name node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
+ <!-- start class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
+ <class name="UnresolvedPathException" extends="org.apache.hadoop.fs.UnresolvedLinkException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnresolvedPathException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used by RemoteException to instantiate an UnresolvedPathException.]]>
+ </doc>
+ </constructor>
+ <constructor name="UnresolvedPathException" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getResolvedPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a path with the link resolved with the target.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown when a symbolic link is encountered in a path.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockKey -->
+ <class name="BlockKey" extends="org.apache.hadoop.security.token.delegation.DelegationKey"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockKey"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockKey" type="int, long, javax.crypto.SecretKey"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Key used for generating and verifying block tokens]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockKey -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier -->
+ <class name="BlockTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockTokenIdentifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockTokenIdentifier" type="java.lang.String, long, java.util.EnumSet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKind" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExpiryDate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setExpiryDate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="expiryDate" type="long"/>
+ </method>
+ <method name="getKeyId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setKeyId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyId" type="int"/>
+ </method>
+ <method name="getUserId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAccessModes" return="java.util.EnumSet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager -->
+ <class name="BlockTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockTokenSecretManager" type="boolean, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructor
+
+ @param isMaster
+ @param keyUpdateInterval
+ @param tokenLifetime
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="exportKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Export block keys, only to be used in master mode]]>
+ </doc>
+ </method>
+ <method name="setKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exportedKeys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set block keys, only to be used in slave mode]]>
+ </doc>
+ </method>
+ <method name="updateKeys"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update block keys, only to be used in master mode]]>
+ </doc>
+ </method>
+ <method name="generateToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="modes" type="java.util.EnumSet"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate an block token for current user]]>
+ </doc>
+ </method>
+ <method name="generateToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="userId" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="modes" type="java.util.EnumSet"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate a block token for a specified user]]>
+ </doc>
+ </method>
+ <method name="checkAccess"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
+ <param name="userId" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <doc>
+ <![CDATA[Check if access should be allowed. userID is not checked if null. This
+ method doesn't check if token password is correct. It should be used only
+ when token password has already been verified (e.g., in the RPC layer).]]>
+ </doc>
+ </method>
+ <method name="checkAccess"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <param name="userId" type="java.lang.String"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <doc>
+ <![CDATA[Check if access should be allowed. userID is not checked if null]]>
+ </doc>
+ </method>
+ <method name="setTokenLifetime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tokenLifetime" type="long"/>
+ <doc>
+ <![CDATA[set token lifetime.]]>
+ </doc>
+ </method>
+ <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty block token identifier
+
+ @return a newly created empty block token identifier]]>
+ </doc>
+ </method>
+ <method name="createPassword" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
+ <doc>
+ <![CDATA[Create a new password/secret for the given block token identifier.
+
+ @param identifier
+ the block token identifier
+ @return token password/secret]]>
+ </doc>
+ </method>
+ <method name="retrievePassword" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <doc>
+ <![CDATA[Look up the token password/secret for the given block token identifier.
+
+ @param identifier
+ the block token identifier to look up
+ @return token password/secret as byte[]
+ @throws InvalidToken]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DUMMY_TOKEN" type="org.apache.hadoop.security.token.Token"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[BlockTokenSecretManager can be instantiated in 2 modes, master mode and slave
+ mode. Master can generate new block keys and export block keys to slaves,
+ while slaves can only import and use block keys received from master. Both
+ master and slave can generate and verify block tokens. Typically, master mode
+ is used by NN and slave mode is used by DN.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
+ <class name="BlockTokenSecretManager.AccessMode" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="READ" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COPY" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REPLACE" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector -->
+ <class name="BlockTokenSelector" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+ <constructor name="BlockTokenSelector"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="service" type="org.apache.hadoop.io.Text"/>
+ <param name="tokens" type="java.util.Collection"/>
+ </method>
+ <doc>
+ <![CDATA[A block token selector for HDFS]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys -->
+ <class name="ExportedBlockKeys" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ExportedBlockKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isBlockTokenEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyUpdateInterval" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTokenLifetime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentKey" return="org.apache.hadoop.hdfs.security.token.block.BlockKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllKeys" return="org.apache.hadoop.hdfs.security.token.block.BlockKey[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="DUMMY_KEYS" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Object for passing block keys]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException -->
+ <class name="InvalidBlockTokenException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidBlockTokenException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidBlockTokenException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Access token verification failed.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
+ <class name="DelegationTokenIdentifier" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenIdentifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty delegation token identifier for reading into.]]>
+ </doc>
+ </constructor>
+ <constructor name="DelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new delegation token identifier
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+ </doc>
+ </constructor>
+ <method name="getKind" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="HDFS_DELEGATION_KIND" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A delegation token identifier that is specific to HDFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
+ <class name="DelegationTokenSecretManager" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenSecretManager" type="long, long, long, long, org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a secret manager
+ @param delegationKeyUpdateInterval the number of seconds for rolling new
+ secret keys.
+ @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+ tokens
+ @param delegationTokenRenewInterval how often the tokens must be renewed
+ @param delegationTokenRemoverScanInterval how often the tokens are scanned
+ for expired tokens]]>
+ </doc>
+ </constructor>
+ <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTokenExpiryTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dtId" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns expiry time of a token given its identifier.
+
+ @param dtId DelegationTokenIdentifier of a token
+ @return Expiry time of the token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="loadSecretManagerState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Load SecretManager state from fsimage.
+
+ @param in input stream to read fsimage
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="saveSecretManagerState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Store the current state of the SecretManager for persistence
+
+ @param out Output stream for writing into fsimage.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addPersistedDelegationToken"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <param name="expiryTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method is intended to be used only while reading edit logs.
+
+ @param identifier DelegationTokenIdentifier read from the edit logs or
+ fsimage
+
+ @param expiryTime token expiry time
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedMasterKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a MasterKey to the list of keys.
+
+ @param key DelegationKey
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedTokenRenewal"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <param name="expiryTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the token cache with renewal record in edit logs.
+
+ @param identifier DelegationTokenIdentifier of the renewed token
+ @param expiryTime
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updatePersistedTokenCancellation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the token cache with the cancel record in edit logs
+
+ @param identifier DelegationTokenIdentifier of the canceled token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getNumberOfKeys" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of delegation keys currently stored.
+ @return number of delegation keys]]>
+ </doc>
+ </method>
+ <method name="logUpdateMasterKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call namesystem to update editlogs for new master key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A HDFS specific delegation token secret manager.
+ The secret manager is responsible for generating and accepting the password
+ for each token.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
+ <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
+ <class name="DelegationTokenSelector" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenSelector"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A delegation token that is specialized for HDFS]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+ <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+ <class name="Balancer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Run a balancer
+ @param args]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main method of Balancer
+ @param args arguments to a Balancer
+ @throws Exception exception that occured during datanode balancing]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return this balancer's configuration]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[set this balancer's configuration]]>
+ </doc>
+ </method>
+ <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum number of concurrent blocks moves for
+ balancing purpose at a datanode]]>
+ </doc>
+ </field>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALREADY_RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_PROGRESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IO_EXCEPTION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ILLEGAL_ARGS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
+ when some datanodes become full or when new empty nodes join the cluster.
+ The tool is deployed as an application program that can be run by the
+ cluster administrator on a live HDFS cluster while applications
+ adding and deleting files.
+
+ <p>SYNOPSIS
+ <pre>
+ To start:
+ bin/start-balancer.sh [-threshold <threshold>]
+ Example: bin/ start-balancer.sh
+ start the balancer with a default threshold of 10%
+ bin/ start-balancer.sh -threshold 5
+ start the balancer with a threshold of 5%
+ To stop:
+ bin/ stop-balancer.sh
+ </pre>
+
+ <p>DESCRIPTION
+ <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
+ default value of 10%. The threshold sets a target for whether the cluster
+ is balanced. A cluster is balanced if for each datanode, the utilization
+ of the node (ratio of used space at the node to total capacity of the node)
+ differs from the utilization of the (ratio of used space in the cluster
+ to total capacity of the cluster) by no more than the threshold value.
+ The smaller the threshold, the more balanced a cluster will become.
+ It takes more time to run the balancer for small threshold values.
+ Also for a very small threshold the cluster may not be able to reach the
+ balanced state when applications write and delete files concurrently.
+
+ <p>The tool moves blocks from highly utilized datanodes to poorly
+ utilized datanodes iteratively. In each iteration a datanode moves or
+ receives no more than the lesser of 10G bytes or the threshold fraction
+ of its capacity. Each iteration runs no more than 20 minutes.
+ At the end of each iteration, the balancer obtains updated datanodes
+ information from the namenode.
+
+ <p>A system property that limits the balancer's use of bandwidth is
+ defined in the default configuration file:
+ <pre>
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>1048576</value>
+ <description> Specifies the maximum bandwidth that each datanode
+ can utilize for the balancing purpose in term of the number of bytes
+ per second. </description>
+ </property>
+ </pre>
+
+ <p>This property determines the maximum speed at which a block will be
+ moved from one datanode to another. The default value is 1MB/s. The higher
+ the bandwidth, the faster a cluster can reach the balanced state,
+ but with greater competition with application processes. If an
+ administrator changes the value of this property in the configuration
+ file, the change is observed when HDFS is next restarted.
+
+ <p>MONITERING BALANCER PROGRESS
+ <p>After the balancer is started, an output file name where the balancer
+ progress will be recorded is printed on the screen. The administrator
+ can monitor the running of the balancer by reading the output file.
+ The output shows the balancer's status iteration by iteration. In each
+ iteration it prints the starting time, the iteration number, the total
+ number of bytes that have been moved in the previous iterations,
+ the total number of bytes that are left to move in order for the cluster
+ to be balanced, and the number of bytes that are being moved in this
+ iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
+ To Move" is decreasing.
+
+ <p>Running multiple instances of the balancer in an HDFS cluster is
+ prohibited by the tool.
+
+ <p>The balancer automatically exits when any of the following five
+ conditions is satisfied:
+ <ol>
+ <li>The cluster is balanced;
+ <li>No block can be moved;
+ <li>No block has been moved for five consecutive iterations;
+ <li>An IOException occurs while communicating with the namenode;
+ <li>Another balancer is running.
+ </ol>
+
+ <p>Upon exit, a balancer returns an exit code and prints one of the
+ following messages to the output file in corresponding to the above exit
+ reasons:
+ <ol>
+ <li>The cluster is balanced. Exiting
+ <li>No block can be moved. Exiting...
+ <li>No block has been moved for 3 iterations. Exiting...
+ <li>Received an IO exception: failure reason. Exiting...
+ <li>Another balancer is running. Exiting...
+ </ol>
+
+ <p>The administrator can interrupt the execution of the balancer at any
+ time by running the command "stop-balancer.sh" on the machine where the
+ balancer is running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+ <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <class name="GenerationStamp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="GenerationStamp"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
+ </doc>
+ </constructor>
+ <method name="getStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="setStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the current generation stamp]]>
+ </doc>
+ </method>
+ <method name="nextStamp" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[First increments the counter and then returns the stamp]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="FIRST_VALID_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first valid generation stamp.]]>
+ </doc>
+ </field>
+ <field name="GRANDFATHER_GENERATION_STAMP" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generation stamp of blocks that pre-date the introduction
+ of a generation stamp.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <interface name="HdfsConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="READ_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DN_KEEPALIVE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy internal HDFS constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
+ <class name="HdfsConstants.BlockUCState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="COMPLETE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Block construction completed.<br>
+ The block has at least one {@link ReplicaState#FINALIZED} replica,
+ and is not going to be modified.]]>
+ </doc>
+ </field>
+ <field name="UNDER_CONSTRUCTION" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is under construction.<br>
+ It has been recently allocated for write or append.]]>
+ </doc>
+ </field>
+ <field name="UNDER_RECOVERY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is under recovery.<br>
+ When a file lease expires its last block may not be {@link #COMPLETE}
+ and needs to go through a recovery procedure,
+ which synchronizes the existing replicas contents.]]>
+ </doc>
+ </field>
+ <field name="COMMITTED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The block is committed.<br>
+ The client reported that all bytes are written to data-nodes
+ with the given generation stamp and block length, but no
+ {@link ReplicaState#FINALIZED}
+ replicas has yet been reported by data-nodes themselves.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[States, which a block can go through while it is under construction.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
+ <class name="HdfsConstants.NamenodeRole" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ACTIVE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STANDBY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Defines the NameNode role.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Type of the node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
+ <class name="HdfsConstants.ReplicaState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="int"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from in]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to out]]>
+ </doc>
+ </method>
+ <field name="FINALIZED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is finalized. The state when replica is not modified.]]>
+ </doc>
+ </field>
+ <field name="RBW" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is being written to.]]>
+ </doc>
+ </field>
+ <field name="RWR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is waiting to be recovered.]]>
+ </doc>
+ </field>
+ <field name="RUR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Replica is under recovery.]]>
+ </doc>
+ </field>
+ <field name="TEMPORARY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary replica: created for replication and relocation only.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Block replica states, which it can go through while being constructed.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toNodeRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Startup options]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <class name="InconsistentFSStateException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when file system state is inconsistent
+ and is not recoverable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <class name="IncorrectVersionException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IncorrectVersionException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception is thrown when external version does not match
+ current version of the application.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.JspHelper -->
+ <class name="JspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blks" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <param name="doRandom" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="streamBlockInAscii"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
+ <param name="genStamp" type="long"/>
+ <param name="blockSize" type="long"/>
+ <param name="offsetIntoBlock" type="long"/>
+ <param name="chunkSizeToView" type="long"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <param name="row" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableFooter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sortNodeList"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="java.util.ArrayList"/>
+ <param name="field" type="java.lang.String"/>
+ <param name="order" type="java.lang.String"/>
+ </method>
+ <method name="printPathWithLinks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.lang.String"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="tokenString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="printGotoForm"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="tokenString" type="java.lang.String"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTitle"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="string2ChunkSizeToView" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Convert a String to chunk-size-to-view.]]>
+ </doc>
+ </method>
+ <method name="getVersionTable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a table containing version information.]]>
+ </doc>
+ </method>
+ <method name="validatePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate filename.
+ @return null if the filename is invalid.
+ Otherwise, return the validated filename.]]>
+ </doc>
+ </method>
+ <method name="validateLong" return="java.lang.Long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate a long value.
+ @return null if the value is invalid.
+ Otherwise, return the validated Long object.]]>
+ </doc>
+ </method>
+ <method name="validateURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Validate a URL.
+ @return null if the value is invalid.
+ Otherwise, return the validated URL String.]]>
+ </doc>
+ </method>
+ <method name="getDefaultWebUser" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If security is turned off, what is the default web user?
+ @param conf the configuration to look in
+ @return the remote user that was configuration]]>
+ </doc>
+ </method>
+ <method name="getUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get {@link UserGroupInformation} and possibly the delegation token out of
+ the request.
+ @param request the http request
+ @return a new user from the request
+ @throws AccessControlException if the request has no token]]>
+ </doc>
+ </method>
+ <method name="getDelegationTokenUrlParam" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tokenString" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the url parameter for the given token string.
+ @param tokenString
+ @return url parameter]]>
+ </doc>
+ </method>
+ <method name="getUrlParam" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="val" type="java.lang.String"/>
+ <param name="paramSeparator" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the url parameter for the given string, prefixed with
+ paramSeparator.
+
+ @param name parameter name
+ @param val parameter value
+ @param paramSeparator URL parameter prefix, i.e. either '?' or '&'
+ @return url parameter]]>
+ </doc>
+ </method>
+ <method name="getUrlParam" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="val" type="java.lang.String"/>
+ <param name="firstParam" type="boolean"/>
+ <doc>
+ <![CDATA[Returns the url parameter for the given string, prefixed with '?' if
+ firstParam is true, prefixed with '&' if firstParam is false.
+
+ @param name parameter name
+ @param val parameter value
+ @param firstParam true if this is the first parameter in the list, false otherwise
+ @return url parameter]]>
+ </doc>
+ </method>
+ <method name="getUrlParam" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="val" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the url parameter for the given string, prefixed with '&'.
+
+ @param name parameter name
+ @param val parameter value
+ @return url parameter]]>
+ </doc>
+ </method>
+ <field name="CURRENT_CONF" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DELEGATION_PARAMETER_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.JspHelper -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
+ <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty storage info of the specified type]]>
+ </doc>
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return default iterator
+ This iterator returns all entries in storageDirs]]>
+ </doc>
+ </method>
+ <method name="dirIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ <doc>
+ <![CDATA[Return iterator based on Storage Directory Type
+ This iterator selects entries in storageDirs of type dirType and returns
+ them via the Iterator]]>
+ </doc>
+ </method>
+ <method name="listStorageDirectories" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[generate storage list (debug line)]]>
+ </doc>
+ </method>
+ <method name="getNumStorageDirs" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ </method>
+ <method name="addStorageDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ </method>
+ <method name="isPreUpgradableLayout" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return true if the layout of the given storage directory is from a version
+ of Hadoop prior to the introduction of the "current" and "previous"
+ directories which allow upgrade and rollback.]]>
+ </doc>
+ </method>
+ <method name="checkVersionUpgradable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="oldVersion" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the upgrade from the given old version is supported. If
+ no upgrade is supported, it throws IncorrectVersionException.
+
+ @param oldVersion]]>
+ </doc>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get common storage fields.
+ Should be overloaded if additional fields need to be get.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set common storage fields.
+ Should be overloaded if additional fields need to be set.
+
+ @param props
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write all data storage files.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unlockAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock all storage directories.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isLockSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether underlying file system supports file locking.
+
+ @return <code>true</code> if exclusive locks are supported or
+ <code>false</code> otherwise.
+ @throws IOException
+ @see StorageDirectory#lock()]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <method name="is203LayoutVersion" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="layoutVersion" type="int"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAYOUT_VERSIONS_203" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Layout versions of 0.20.203 release]]>
+ </doc>
+ </field>
+ <field name="STORAGE_FILE_VERSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Storage information file.
+ <p>
+ Local storage information is stored in a separate file VERSION.
+ It contains type of the node,
+ the storage layout version, the namespace id, and
+ the fs state creation time.
+ <p>
+ Local storage can reside in multiple directories.
+ Each directory should contain the same VERSION file as the others.
+ During startup Hadoop servers (name-node and data-nodes) read their local
+ storage information from them.
+ <p>
+ The servers hold a lock for each storage directory while they run so that
+ other nodes were not able to startup sharing the same storage.
+ The locks are released when the servers stop (normally or abnormally).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <class name="Storage.StorageDirectory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Storage.StorageDirectory" type="java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRoot" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get root directory of this storage]]>
+ </doc>
+ </method>
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get storage directory type]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read version file.
+
+ @throws IOException if file cannot be read or contains inconsistent data]]>
+ </doc>
+ </method>
+ <method name="read"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write version file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clearDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear and re-create storage directory.
+ <p>
+ Removes contents of the current directory and creates an empty directory.
+
+ This does not fully format storage directory.
+ It cannot write the version file since it should be written last after
+ all other storage type dependent files are written.
+ Derived storage is responsible for setting specific storage values and
+ writing the version file to disk.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Directory {@code current} contains latest files defining
+ the file system meta-data.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[File {@code VERSION} contains the following fields:
+ <ol>
+ <li>node type</li>
+ <li>layout version</li>
+ <li>namespaceID</li>
+ <li>fs state creation time</li>
+ <li>other fields specific for this node type</li>
+ </ol>
+ The version file is always written last during storage directory updates.
+ The existence of the version file indicates that all other files have
+ been successfully written in the storage directory, the storage is valid
+ and does not need to be recovered.
+
+ @return the version file path]]>
+ </doc>
+ </method>
+ <method name="getPreviousVersionFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[File {@code VERSION} from the {@code previous} directory.
+
+ @return the previous version file path]]>
+ </doc>
+ </method>
+ <method name="getPreviousDir" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Directory {@code previous} contains the previous file system state,
+ which the system can be rolled back to.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getPreviousTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code previous.tmp} is a transient directory, which holds
+ current file system state while the new state is saved into the new
+ {@code current} during upgrade.
+ If the saving succeeds {@code previous.tmp} will be moved to
+ {@code previous}, otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getRemovedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code removed.tmp} is a transient directory, which holds
+ current file system state while the previous state is moved into
+ {@code current} during rollback.
+ If the moving succeeds {@code removed.tmp} will be removed,
+ otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getFinalizedTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code finalized.tmp} is a transient directory, which holds
+ the {@code previous} file system state while it is being removed
+ in response to the finalize request.
+ Finalize operation will remove {@code finalized.tmp} when completed,
+ otherwise the removal will resume upon the system startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getLastCheckpointTmp" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code lastcheckpoint.tmp} is a transient directory, which holds
+ current file system state while the new state is saved into the new
+ {@code current} during regular namespace updates.
+ If the saving succeeds {@code lastcheckpoint.tmp} will be moved to
+ {@code previous.checkpoint}, otherwise it will be renamed back to
+ {@code current} by the recovery procedure during startup.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="getPreviousCheckpoint" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@code previous.checkpoint} is a directory, which holds the previous
+ (before the last save) state of the storage directory.
+ The directory is created as a reference only, it does not play role
+ in state recovery procedures, and is recycled automatically,
+ but it may be useful for manual recovery of a stale state of the system.
+
+ @return the directory path]]>
+ </doc>
+ </method>
+ <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check consistency of the storage directory
+
+ @param startOpt a startup option.
+
+ @return state {@link StorageState} of the storage directory
+ @throws InconsistentFSStateException if directory state is not
+ consistent and cannot be recovered.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doRecover"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete or recover storage state from previously failed transition.
+
+ @param curState specifies what/how the state should be recovered
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Lock storage to provide exclusive access.
+
+ <p> Locking is not supported by all file systems.
+ E.g., NFS does not consistently support exclusive locks.
+
+ <p> If locking is supported we guarantee exculsive access to the
+ storage directory. Otherwise, no guarantee is given.
+
+ @throws IOException if locking fails]]>
+ </doc>
+ </method>
+ <method name="unlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unlock storage.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[One of the storage directories.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <interface name="Storage.StorageDirType" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isOfType" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
+ </method>
+ <doc>
+ <![CDATA[An interface to denote storage directory type
+ Implementations can define a type for storage directory by implementing
+ this interface.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <class name="Storage.StorageState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <class name="StorageInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="int, int, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLayoutVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Layout version of the storage data.]]>
+ </doc>
+ </method>
+ <method name="getNamespaceID" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Namespace id of the file system.<p>
+ Assigned to the file system at formatting and never changes after that.
+ Shared by all file system components.]]>
+ </doc>
+ </method>
+ <method name="getCTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creation time of the file system state.<p>
+ Modified during upgrades.]]>
+ </doc>
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="layoutVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namespaceID" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="cTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Common class for storage information.
+
+ TODO namespaceID should be long and computed as hash(address + port)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
+ <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <interface name="Upgradeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the upgrade object.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of the software component, which this object is upgrading.
+ @return type]]>
+ </doc>
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Description of the upgrade object for displaying.
+ @return description]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Upgrade status determines a percentage of the work done out of the total
+ amount required by the upgrade.
+
+ 100% means that the upgrade is completed.
+ Any value < 100 means it is not complete.
+
+ The return value should provide at least 2 values, e.g. 0 and 100.
+ @return integer value in the range [0, 100].]]>
+ </doc>
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Prepare for the upgrade.
+ E.g. initialize upgrade data structures and set status to 0.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. name-node informs data-nodes that they must perform a distributed upgrade.
+
+ @return an UpgradeCommand for broadcasting.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade.
+ E.g. cleanup upgrade data structures or write metadata to disk.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. data-nodes inform the name-node that they completed the upgrade
+ while other data-nodes are still upgrading.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status report for the upgrade.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return {@link UpgradeStatusReport}
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Common interface for distributed upgrade objects.
+
+ Each upgrade object corresponds to a layout version,
+ which is the latest version that should be upgraded using this object.
+ That is all components whose layout version is greater or equal to the
+ one returned by {@link #getVersion()} must be upgraded with this object.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <class name="UpgradeManager" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeState" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setUpgradeState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uState" type="boolean"/>
+ <param name="uVersion" type="int"/>
+ </method>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initializeUpgrade" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isUpgradeCompleted" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="currentUpgrades" type="java.util.SortedSet"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeState" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeVersion" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Generic upgrade manager.
+
+ {@link #broadcastCommand} is the command that should be]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <class name="UpgradeObject" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ <constructor name="UpgradeObject"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="status" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Abstract upgrade object.
+
+ Contains default implementation of common methods of {@link Upgradeable}
+ interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <class name="UpgradeObjectCollection" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectCollection"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDistributedUpgrades" return="java.util.SortedSet"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="versionFrom" type="int"/>
+ <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of upgrade objects.
+
+ Upgrade objects should be registered here before they can be used.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <class name="UpgradeStatusReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UpgradeStatusReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeStatusReport" type="int, short, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the currently running upgrade.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
+
+ @see Upgradeable#getUpgradeStatus()]]>
+ </doc>
+ </method>
+ <method name="isFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is current upgrade finalized.
+ @return true if finalized or false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <doc>
+ <![CDATA[Get upgradeStatus data as a text for reporting.
+ Should be overloaded for a particular upgrade specific upgradeStatus data.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return text]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print basic upgradeStatus details.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="version" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeStatus" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="finalized" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base upgrade upgradeStatus class.
+ Overload this class if specific status fields need to be reported.
+
+ Describes status of current upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
+ <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Util"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="now" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Current system time.
+ @return current time in msec.]]>
+ </doc>
+ </method>
+ <method name="stringAsURI" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interprets the passed string as a URI. In case of error it
+ assumes the specified string is a file.
+
+ @param s the string to interpret
+ @return the resulting URI
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="fileAsURI" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the passed File to a URI.
+
+ @param f the file to convert
+ @return the resulting URI
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stringCollectionAsURIs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Converts a collection of strings into a collection of URIs.
+ @param names collection of strings to convert to URIs
+ @return collection of URIs]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <implements name="org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean"/>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
+ </doc>
+ </method>
+ <method name="getInfoAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Determine the http server's effective addr]]>
+ </doc>
+ </method>
+ <method name="newSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
+ </doc>
+ </method>
+ <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the DataNode object]]>
+ </doc>
+ </method>
+ <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="socketTimeout" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNameNodeAddrForClient" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSelfAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDatanodeRegistration" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return DatanodeRegistration]]>
+ </doc>
+ </method>
+ <method name="setNewStorageID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.
+ This method can only be called by the offerService thread.
+ Otherwise, deadlock might occur.]]>
+ </doc>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if there is no space in disk
+ @param e that caused this checkDiskError call]]>
+ </doc>
+ </method>
+ <method name="checkDiskError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if there is a disk failure and if so, handle the error]]>
+ </doc>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Main loop for the DataNode. Runs until shutdown,
+ forever calling remote NameNode functions.]]>
+ </doc>
+ </method>
+ <method name="notifyNamenodeReceivedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
+ That's the loop that connects to the NameNode and provides basic DataNode
+ functionality.
+
+ Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
+ </doc>
+ </method>
+ <method name="runDatanodeDaemon"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate a single datanode object. This must be run by invoking
+ {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
+ </doc>
+ </method>
+ <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate a single datanode object, along with its secure resources.
+ This must be run by invoking{@link DataNode#runDatanodeDaemon(DataNode)}
+ subsequently.]]>
+ </doc>
+ </method>
+ <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
+ If this thread is specifically interrupted, it will stop waiting.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="scheduleBlockReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delay" type="long"/>
+ <doc>
+ <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
+ </doc>
+ </method>
+ <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is used for testing.
+ Examples are adding and deleting blocks directly.
+ The most common usage will be when the data node's storage is similated.
+
+ @return the fsdataset that stores the blocks]]>
+ </doc>
+ </method>
+ <method name="secureMain"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="java.util.Collection"/>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica with the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getStreamingAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRpcPort" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHttpPort" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNamenodeAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getVolumeInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returned information is a JSON representation of a map with
+ volume name as the key and value is a map of volume attribute
+ keys to its values]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EMPTY_DEL_HINT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DataNode is a class (and program) that stores a set of
+ blocks for a DFS deployment. A single deployment can
+ have one or many DataNodes. Each DataNode communicates
+ regularly with a single NameNode. It also communicates
+ with client code and other DataNodes from time to time.
+
+ DataNodes store a series of named blocks. The DataNode
+ allows client code to read these blocks, or to write new
+ block data. The DataNode may also, in response to instructions
+ from its NameNode, delete blocks or copy blocks to/from other
+ DataNodes.
+
+ The DataNode maintains just one critical table:
+ block-> stream of bytes (of BLOCK_SIZE or less)
+
+ This info is stored on a local disk. The DataNode
+ reports the table's contents to the NameNode upon startup
+ and every so often afterwards.
+
+ DataNodes spend their lives in an endless loop of asking
+ the NameNode for something to do. A NameNode cannot connect
+ to a DataNode directly; a NameNode simply returns values from
+ functions invoked by a DataNode.
+
+ DataNodes maintain an open server socket so that client code
+ or other DataNodes can read/write data. The host/port for
+ this server is reported to the NameNode, which then sends that
+ information to clients or other DataNodes that might be interested.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
+ <class name="DatanodeJspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeJspHelper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
+ <interface name="DataNodeMXBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the version of Hadoop.
+
+ @return the version of Hadoop]]>
+ </doc>
+ </method>
+ <method name="getRpcPort" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the rpc port.
+
+ @return the rpc port]]>
+ </doc>
+ </method>
+ <method name="getHttpPort" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the http port.
+
+ @return the http port]]>
+ </doc>
+ </method>
+ <method name="getNamenodeAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the namenode IP address.
+
+ @return the namenode IP address]]>
+ </doc>
+ </method>
+ <method name="getVolumeInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the information of each volume on the Datanode. Please
+ see the implementation for the format of returned information.
+
+ @return the volume info]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for data node information]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isPreUpgradableLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Data storage information file.
+ <p>
+ @see Storage]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
+ <class name="DirectoryScanner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Periodically scans the data directories for block and block metadata files.
+ Reconciles the differences with block information maintained in
+ {@link FSDataset}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <class name="FSDataset" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
+ <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[An FSDataset has a directory where it loads its data files.]]>
+ </doc>
+ </constructor>
+ <method name="getMetaFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="findBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[Return the block file for the given ID]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total space used by dfs datanode]]>
+ </doc>
+ </method>
+ <method name="hasEnoughResource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true - if there are still valid volumes on the DataNode.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return total capacity, used and unused]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getNumFailedVolumes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of failed volumes in the FSDataset.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Find the block's on-disk length]]>
+ </doc>
+ </method>
+ <method name="getBlockFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get File name for a given block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkOffset" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns handles to the block file and its metadata file]]>
+ </doc>
+ </method>
+ <method name="unlinkBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="numLinks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the block if this block is linked to an existing
+ snapshot. This ensures that modifying this block does not modify
+ data in any existing snapshots.
+ @param block Block
+ @param numLinks Unlink if the number of links exceed this value
+ @throws IOException
+ @return - true if the specified block was unlinked or the block
+ is not in any snapshot.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverClose"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="adjustCrcChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="checksumSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the offset in the meta file so that the
+ last checksum will be overwritten.]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete the block write!]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove the temporary block file (if any)]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generates a block report from the in-memory block map.]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Check whether the given block is a valid one.
+ valid means finalized]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[We're informed that a block is no longer valid. We
+ could lazily garbage-collect the block, but why bother?
+ just get rid of it.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Turn the block identifier into a filename; ignore generation stamp!!!]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[check if a data directory is healthy
+ if some volumes failed - make sure to remove all the blocks that belong
+ to these volumes
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="checkAndUpdate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <param name="diskFile" type="java.io.File"/>
+ <param name="diskMetaFile" type="java.io.File"/>
+ <param name="vol" type="org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume"/>
+ <doc>
+ <![CDATA[Reconcile the difference between blocks on the disk and blocks in
+ volumeMap
+
+ Check the given block for inconsistencies. Look at the
+ current state of the block and reconcile the differences as follows:
+ <ul>
+ <li>If the block file is missing, delete the block from volumeMap</li>
+ <li>If the block file exists and the block is missing in volumeMap,
+ add the block to volumeMap <li>
+ <li>If generation stamp does not match, then update the block with right
+ generation stamp</li>
+ <li>If the block length in memory does not match the actual block file length
+ then mark the block as corrupt and update the block length in memory</li>
+ <li>If the file in {@link ReplicaInfo} does not match the file on
+ the disk, update {@link ReplicaInfo} with the correct file</li>
+ </ul>
+
+ @param blockId Block that differs
+ @param diskFile Block file on the disk
+ @param diskMetaFile Metadata file from on the disk
+ @param vol Volume of the block file]]>
+ </doc>
+ </method>
+ <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #fetchReplicaInfo(long)} instead.">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[@deprecated use {@link #fetchReplicaInfo(long)} instead.]]>
+ </doc>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newlength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="METADATA_EXTENSION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="METADATA_VERSION" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSDataset manages a set of data blocks. Each block
+ has a unique name and an extent on disk.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <interface name="FSDatasetInterface" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the length of the metadata file of the specified block
+ @param b - the block for which the metadata length is desired
+ @return the length of the metadata file for the specified block.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns metaData of block b as an input stream (and its length)
+ @param b - the block
+ @return the metadata input stream;
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Does the meta file exist for this block?
+ @param b - the block
+ @return true of the metafile for specified block exits
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the specified block's on-disk length (excluding metadata)
+ @param b
+ @return the specified block's on-disk length (excluding metadta)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.Replica"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <doc>
+ <![CDATA[Get reference to the replica meta info in the replicasMap.
+ To be called from methods that are synchronized on {@link FSDataset}
+ @param blockId
+ @return replica from the replicas map]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the generation stamp stored with the block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream to read the contents of the specified block
+ @param b
+ @return an input stream to read the contents of the specified block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ @param b
+ @param seekOffset
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="blkoff" type="long"/>
+ <param name="ckoff" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ The block is still in the tmp directory and is not finalized
+ @param b
+ @param blkoff
+ @param ckoff
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary replica and returns the meta information of the replica
+
+ @param b block
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a RBW replica and returns the meta info of the replica
+
+ @param b block
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="minBytesRcvd" type="long"/>
+ <param name="maxBytesRcvd" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recovers a RBW replica and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param minBytesRcvd the minimum number of bytes that the replica could have
+ @param maxBytesRcvd the maximum number of bytes that the replica could have
+ @return the meta info of the replica which is being written to
+ @throws IOException if an error occurs]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to a finalized replica and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @return the meata info of the replica which is being written to
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover a failed append to a finalized replica
+ and returns the meta info of the replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @return the meta info of the replica which is being written to
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="recoverClose"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newGS" type="long"/>
+ <param name="expectedBlockLen" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover a failed pipeline close
+ It bumps the replica's generation stamp and finalize it if RBW replica
+
+ @param b block
+ @param newGS the new generation stamp for the replica
+ @param expectedBlockLen the number of bytes the replica is expected to have
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
+ The block size is what is in the parameter b and it must match the amount
+ of data written
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
+ The temporary file associated with this block is deleted.
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block report - the full list of blocks stored
+ @return - the block report - the full list of blocks stored]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <doc>
+ <![CDATA[Is the block valid?
+ @param b
+ @return - true if the specified block is valid]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the specified blocks
+ @param invalidBlks - the blocks to be invalidated
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[Check if all the data directories are healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stringifies the name of the storage]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the FSDataset]]>
+ </doc>
+ </method>
+ <method name="adjustCrcChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="checksumSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the file pointer of the checksum stream so that the last checksum
+ will be overwritten
+ @param b block
+ @param stream The stream for the data file and checksum file
+ @param checksumSize number of bytes each checksum has
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="hasEnoughResource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checks how many valid storage volumes there are in the DataNode.
+ @return true if more than the minimum number of valid volumes are left
+ in the FSDataSet.]]>
+ </doc>
+ </method>
+ <method name="getReplicaVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get visible length of the specified replica.]]>
+ </doc>
+ </method>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a replica recovery.
+
+ @return actual state of the replica on this data-node or
+ null if data-node does not have the replica.]]>
+ </doc>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica's generation stamp and length and finalize it.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is an interface for the underlying storage that stores blocks for
+ a data node.
+ Examples are the FSDataset (which stores blocks on dirs) and
+ SimulatedFSDataset (which simulates data).]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class contains the input streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This class contains the output streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides the input stream and length of the metadata
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.Replica -->
+ <interface name="Replica" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getBlockId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block ID]]>
+ </doc>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the generation stamp]]>
+ </doc>
+ </method>
+ <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replica state
+ @return the replica state]]>
+ </doc>
+ </method>
+ <method name="getNumBytes" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes received
+ @return the number of bytes that have been received]]>
+ </doc>
+ </method>
+ <method name="getBytesOnDisk" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes that have written to disk
+ @return the number of bytes that have written to disk]]>
+ </doc>
+ </method>
+ <method name="getVisibleLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of bytes that are visible to readers
+ @return the number of bytes that are visible to readers]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This represents block replicas which are stored in DataNode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.Replica -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
+ <class name="ReplicaInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.datanode.Replica"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class is used by datanodes to maintain meta data of its replicas.
+ It provides a general interface for meta information of a replica.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
+ <class name="ReplicaNotFoundException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReplicaNotFoundException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ReplicaNotFoundException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Exception indicating that DataNode does not have a replica
+ that matches the target block.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
+ <class name="SecureDataNodeStarter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.commons.daemon.Daemon"/>
+ <constructor name="SecureDataNodeStarter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.commons.daemon.DaemonContext"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="destroy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Utility class to start a datanode in a secure cluster, first obtaining
+ privileged resources before main startup and handing them to the datanode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
+ <class name="SecureDataNodeStarter.SecureResources" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SecureDataNodeStarter.SecureResources" type="java.net.ServerSocket, org.mortbay.jetty.nio.SelectChannelConnector"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStreamingSocket" return="java.net.ServerSocket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getListener" return="org.mortbay.jetty.nio.SelectChannelConnector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Stash necessary resources needed for datanode operation in a secure env.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+ <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="UpgradeObjectDatanode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpgrade"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Specifies how the upgrade is performed.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade and return a status complete command for broadcasting.
+
+ Data-nodes finish upgrade at different times.
+ The data-node needs to re-confirm with the name-node that the upgrade
+ is complete while other nodes are still upgrading.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for data-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
+ The MBean is register using the name
+ "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <class name="DataNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="volumeFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various DataNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #blocksRead}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
+ <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+ <interface name="FSDatasetMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the total space (in bytes) used by dfs datanode
+ @return the total space used by dfs datanode
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
+ @return total capacity of storage (used and unused)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the amount of free storage space (in bytes)
+ @return The amount of free storage space
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the storage id of the underlying storage]]>
+ </doc>
+ </method>
+ <method name="getNumFailedVolumes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of failed volumes in the datanode.
+ @return The number of failed volumes in the datanode.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
+ a data node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+ * Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the FSDatasetMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Data Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
+ <class name="BackupNode" extends="org.apache.hadoop.hdfs.server.namenode.NameNode"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getServiceRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setRpcServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setRpcServiceServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setHttpServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="loadNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nnReg" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="args" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[BackupNode.
+ <p>
+ Backup node can play two roles.
+ <ol>
+ <li>{@link NamenodeRole#CHECKPOINT} node periodically creates checkpoints,
+ that is downloads image and edits from the active node, merges them, and
+ uploads the new image back to the active.</li>
+ <li>{@link NamenodeRole#BACKUP} node keeps its namespace in sync with the
+ active node, and periodically creates checkpoints by simply saving the
+ namespace image to local disk(s).</li>
+ </ol>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
+ <class name="BackupStorage" extends="org.apache.hadoop.hdfs.server.namenode.FSImage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="isPreUpgradableLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
+ <class name="BlockManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="processReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="report" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting all its blocks. Use this info to
+ update the (machine-->blocklist) and (block-->machinelist) tables.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_INITIAL_MAP_CAPACITY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_MAP_LOAD_FACTOR" type="float"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_MAX_CORRUPT_FILES_RETURNED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Keeps information related to the blocks stored in the Hadoop cluster.
+ This class is a helper class for {@link FSNamesystem} and requires several
+ methods to be called with lock held on {@link FSNamesystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
+ <class name="BlockPlacementPolicy" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockPlacementPolicy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="verifyBlockPlacement" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <param name="minRacks" type="int"/>
+ <doc>
+ <![CDATA[Verify that the block is replicated on at least minRacks different racks
+ if there is more than minRacks rack in the system.
+
+ @param srcPath the full pathname of the file to be verified
+ @param lBlk block with locations
+ @param minRacks number of racks the block should be replicated to
+ @return the difference between the required and the actual number of racks
+ the block is replicated to.]]>
+ </doc>
+ </method>
+ <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="replicationFactor" type="short"/>
+ <param name="existingReplicas" type="java.util.Collection"/>
+ <param name="moreExistingReplicas" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Decide whether deleting the specified replica of the block still makes
+ the block conform to the configured block placement policy.
+
+ @param srcInode The inode of the file to which the block-to-be-deleted belongs
+ @param block The block to be deleted
+ @param replicationFactor The required number of replicas for this block
+ @param existingReplicas The replica locations of this block that are present
+ on at least two unique racks.
+ @param moreExistingReplicas Replica locations of this block that are not
+ listed in the previous parameter.
+ @return the replica that is the best candidate for deletion]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[Used to setup a BlockPlacementPolicy object. This should be defined by
+ all implementations of a BlockPlacementPolicy.
+
+ @param conf the configuration object
+ @param stats retrieve cluster status from here
+ @param clusterMap cluster topology]]>
+ </doc>
+ </method>
+ <method name="getInstance" return="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[Get an instance of the configured Block Placement Policy based on the
+ value of the configuration paramater dfs.block.replicator.classname.
+
+ @param conf the configuration to be used
+ @param stats an object that is used to retrieve the load on the cluster
+ @param clusterMap the network topology of the cluster
+ @return an instance of BlockPlacementPolicy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used for choosing the desired number of targets
+ for placing block replicas.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
+ <class name="BlockPlacementPolicy.NotEnoughReplicasException" extends="java.lang.Exception"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
+ <class name="BlockPlacementPolicyDefault" extends="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="excludedNodes" type="java.util.HashMap"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="numOfReplicas" type="int"/>
+ <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <param name="chosenNodes" type="java.util.List"/>
+ <param name="blocksize" type="long"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="verifyBlockPlacement" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+ <param name="minRacks" type="int"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="replicationFactor" type="short"/>
+ <param name="first" type="java.util.Collection"/>
+ <param name="second" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The class is responsible for choosing the desired number of targets
+ for placing block replicas.
+ The replica placement strategy is that if the writer is on a datanode,
+ the 1st replica is placed on the local machine,
+ otherwise a random datanode. The 2nd replica is placed on a datanode
+ that is on a different rack. The 3rd replica is placed on a datanode
+ which is on a different node of the rack as the second replica.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
+ <class name="CancelDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CancelDelegationTokenServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="PATH_SPEC" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TOKEN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Cancel delegation tokens over http for use in hftp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="CheckpointSignature"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
+ <class name="ContentSummaryServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContentSummaryServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Servlets for file checksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <class name="CorruptReplicasMap" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CorruptReplicasMap"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addToCorruptReplicasMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt.
+
+ @param blk Block to be added to CorruptReplicasMap
+ @param dn DatanodeDescriptor which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="numCorruptReplicas" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Stores information about all corrupt blocks in the File System.
+ A Block is considered corrupt only if all of its replicas are
+ corrupt. While reporting replicas of a Block, we hide any corrupt
+ copies. These copies are removed once Block is found to have
+ expected number of good replicas.
+ Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+ @param nodeID id of the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param hostName it could be different from host specified for DatanodeID]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param capacity capacity of the data node
+ @param dfsUsed space used by the data node
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param capacity capacity of the data node, including space used by non-dfs
+ @param dfsUsed the used space by dfs datanode
+ @param remaining remaining capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <method name="numBlocks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlocksScheduled" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Approximate number of blocks currently scheduled to be written
+ to this datanode.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getVolumeFailures" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return number of failed volumes in the datanode.]]>
+ </doc>
+ </method>
+ <method name="updateRegInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <doc>
+ <![CDATA[@param nodeReg DatanodeID to update registration for.]]>
+ </doc>
+ </method>
+ <field name="isAlive" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="needKeyUpdate" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeDescriptor tracks stats on a given DataNode, such as
+ available storage capacity, last update time, etc., and maintains a
+ set of blocks stored on the datanode.
+
+ This data structure is internal to the namenode. It is *not* sent
+ over-the-wire to the Client or the Datanodes. Neither is it stored
+ persistently in the fsImage.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Block and targets pair]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <class name="FileChecksumServlets" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Servlets for file checksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.GetServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Get FileChecksum]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileChecksumServlets.RedirectServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileDataServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="parent" type="java.lang.String"/>
+ <param name="i" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="dt" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+ <doc>
+ <![CDATA[Create a redirection URI]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/data[/<path>] HTTP/1.1
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <class name="FsckServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FsckServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Handle fsck request]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's web server to do fsck on namenode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
+ <interface name="FSClusterStats" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[an indication of the total load of the cluster.
+
+ @return a count of the total number of block transfers and block
+ writes that are currently occuring on the cluster.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used for retrieving the load related statistics of
+ the cluster.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <class name="FSEditLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="logSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sync all modifications done by this thread.
+
+ The internal concurrency design of this class is as follows:
+ - Log items are written synchronized into an in-memory buffer,
+ and each assigned a transaction ID.
+ - When a thread (client) would like to sync all of its edits, logSync()
+ uses a ThreadLocal transaction ID to determine what edit number must
+ be synced to.
+ - The isSyncRunning volatile boolean tracks whether a sync is currently
+ under progress.
+
+ The data is double-buffered within each edit log implementation so that
+ in-memory writing can occur in parallel with the on-disk writing.
+
+ Each sync occurs in three steps:
+ 1. synchronized, it swaps the double buffer and sets the isSyncRunning
+ flag.
+ 2. unsynchronized, it flushes the data to storage
+ 3. synchronized, it resets the flag and notifies anyone waiting on the
+ sync.
+
+ The lack of synchronization on step 2 allows other threads to continue
+ to write into the memory buffer while the sync is in progress.
+ Because this step is unsynchronized, actions that need to avoid
+ concurrency with sync() should be synchronized and also call
+ waitForSyncToFinish() before assuming they are running alone.]]>
+ </doc>
+ </method>
+ <method name="logOpenFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
+ <doc>
+ <![CDATA[Add open lease record to edit log.
+ Records the block locations of the last block.]]>
+ </doc>
+ </method>
+ <method name="logCloseFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
+ <doc>
+ <![CDATA[Add close lease record to edit log.]]>
+ </doc>
+ </method>
+ <method name="logMkDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
+ <doc>
+ <![CDATA[Add create directory record to edit log]]>
+ </doc>
+ </method>
+ <method name="setBufferCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ </method>
+ <method name="getOutputStreamIterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="streamType" type="org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType"/>
+ <doc>
+ <![CDATA[Get stream iterator for the specified type.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader -->
+ <class name="FSEditLogLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSEditLogLoader" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="setRestoreFailedStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="getRestoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="props" type="java.util.Properties"/>
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write last checkpoint time and version file into the storage directory.
+
+ The version file should always be written last.
+ Missing or corrupted version file indicates that
+ the checkpoint is not valid.
+
+ @param sd storage directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isPreUpgradableLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setImageDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="digest" type="org.apache.hadoop.io.MD5Hash"/>
+ </method>
+ <method name="saveCurrent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Save current image and empty journal into {@code current} directory.]]>
+ </doc>
+ </method>
+ <method name="moveCurrent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move {@code current} to {@code lastcheckpoint.tmp} and
+ recreate empty {@code current}.
+ {@code current} is moved only if it is well formatted,
+ that is contains VERSION file.
+
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()]]>
+ </doc>
+ </method>
+ <method name="moveLastCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move {@code lastcheckpoint.tmp} to {@code previous.checkpoint}
+
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
+ @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()]]>
+ </doc>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFsEditName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="checkpointTime" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="editLog" type="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="imageDigest" type="org.apache.hadoop.io.MD5Hash"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="newImageDigest" type="org.apache.hadoop.io.MD5Hash"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="removedStorageDirs" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[list of failed (and thus removed) storages]]>
+ </doc>
+ </field>
+ <field name="ckptState" type="org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates"
+ transient="false" volatile="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Can fs-image be rolled?]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageSerialization -->
+ <class name="FSImageSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readPathComponents" return="byte[][]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reading the path from the image and converting it to byte[][] directly
+ this saves us an array copy and conversions to and from String
+ @param in
+ @return the array each element of which is a byte[] representation
+ of a path component
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility functions for serializing various pieces of data in the correct
+ format for the FSImage file.
+
+ Some members are currently public for the benefit of the Offline Image Viewer
+ which is located outside of this package. These members should be made
+ package-protected when the OIV is refactored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageSerialization -->
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
+ <interface name="FSInodeInfo" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFullPathName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a string representation of an inode
+
+ @return the full pathname (from root) that this inode represents]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface is used used the pluggable block placement policy
+ to expose a few characteristics of an Inode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <class name="FSNamesystem" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
+ <implements name="org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean"/>
+ <method name="getNamespaceDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getStorageDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propertyName" type="java.lang.String"/>
+ </method>
+ <method name="getNamespaceEditsDirs" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the default path permission when upgrading from releases with no
+ permissions (<=0.15) to releases with permissions (>=0.16)]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close down this file system manager.
+ Causes heartbeat and lease daemons to stop; waits briefly for
+ them to finish, but a short timeout returns control back to caller.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permissions for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner for an existing file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="srcs" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Moves all the blocks from srcs and appends them to trg
+ To avoid rollbacks we will verify validitity of ALL of the args
+ before we start actual move.
+ @param target
+ @param srcs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[stores the modification and access time for this inode.
+ The access time is precise upto an hour. The transaction, if needed, is
+ written to the edits log but is not flushed.]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerms" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create a symbolic link.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ The NameNode sets new replication and schedules either replication of
+ under-replicated data blocks or removal of the excessive block copies
+ if the blocks are over-replicated.
+
+ @see ClientProtocol#setReplication(String, short)
+ @param src file name
+ @param replication new replication
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludedNodes" type="java.util.HashMap"/>
+ <exception name="LeaseExpiredException" type="org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException"/>
+ <exception name="NotReplicatedYetException" type="org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException"/>
+ <exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client would like to obtain an additional block for the indicated
+ filename (which is being written-to). Return an array that consists
+ of the block, plus a set of machines. The first on this list should
+ be where the client writes data. Subsequent items in the list must
+ be provided in the connection to the first datanode.
+
+ Make sure the previous blocks have been reported by datanodes and
+ are replicated. Will return an empty 2-elt array if we want the
+ client to "try again later".]]>
+ </doc>
+ </method>
+ <method name="abandonBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="LeaseExpiredException" type="org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client would like to let go of the given block]]>
+ </doc>
+ </method>
+ <method name="completeFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete in-progress write to the given file.
+ @return true if successful, false if the client should continue to retry
+ (e.g if not all blocks have reached minimum replication yet)
+ @throws IOException on error (eg lease mismatch, file not open, file deleted)]]>
+ </doc>
+ </method>
+ <method name="markBlockAsCorrupt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the block belonging to datanode as corrupt
+ @param blk Block to be marked as corrupt
+ @param dn Datanode which holds the corrupt replica]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove the indicated file from namespace.
+
+ @see ClientProtocol#delete(String, boolean) for detailed descriptoin and
+ description of exceptions]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <doc>
+ <![CDATA[Create all the necessary directories]]>
+ </doc>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <param name="needLocation" type="boolean"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a partial listing of the indicated directory
+
+ @param src the directory name
+ @param startAfter the name to start after
+ @param needLocation if blockLocations need to be returned
+ @return a partial listing starting after startAfter
+
+ @throws AccessControlException if access is denied
+ @throws UnresolvedLinkException if symbolic link is encountered
+ @throws IOException if other I/O error occurred]]>
+ </doc>
+ </method>
+ <method name="registerDatanode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+ <p>
+ The purpose of registration is to identify whether the new datanode
+ serves a new data storage, and will report new data block copies,
+ which the namenode was not aware of; or the datanode is a replacement
+ node for the data storage that was previously served by a different
+ or the same (in terms of host:port) datanode.
+ The data storages are distinguished by their storageIDs. When a new
+ data storage is reported the namenode issues a new unique storageID.
+ <p>
+ Finally, the namenode returns its namespaceID as the registrationID
+ for the datanodes.
+ namespaceID is a persistent attribute of the name space.
+ The registrationID is checked every time the datanode is communicating
+ with the namenode.
+ Datanodes with inappropriate registrationID are rejected.
+ If the namenode stops, and then restarts it can restore its
+ namespaceID and will continue serving the datanodes that has previously
+ registered with the namenode without restarting the whole cluster.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
+ </doc>
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get registrationID for datanodes based on the namespaceID.
+
+ @see #registerDatanode(DatanodeRegistration)
+ @see FSImage#newNamespaceID()
+ @return registration ID]]>
+ </doc>
+ </method>
+ <method name="computeDatanodeWork" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Compute block replication and block invalidation work
+ that can be scheduled on data-nodes.
+ The datanode will be informed of this work at the next heartbeat.
+
+ @return number of blocks scheduled for replication or removal.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setNodeReplicationLimit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="limit" type="int"/>
+ </method>
+ <method name="removeDatanode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Remove a datanode descriptor.
+ @param nodeID datanode ID.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting all its blocks. Use this info to
+ update the (machine-->blocklist) and (block-->machinelist) tables.]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="delHint" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The given node is reporting that it received a certain block.]]>
+ </doc>
+ </method>
+ <method name="getMissingBlocksCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total raw bytes including non-dfs used space.]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsedNonDFS" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total used space by data nodes for non DFS purposes such
+ as storing temporary files on the local file system]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total non-used raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemainingPercent" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of connections.]]>
+ </doc>
+ </method>
+ <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList"/>
+ <param name="dead" type="java.util.ArrayList"/>
+ </method>
+ <method name="stopDecommission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stop decommissioning the specified datanodes.]]>
+ </doc>
+ </method>
+ <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getStartTime" return="java.util.Date"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rereads the config to get hosts and exclude list file names.
+ Rereads the files to update the hosts and exclude lists. It
+ checks if any of the hosts have changed states:
+ 1. Added to hosts --> no further work needed here.
+ 2. Removed from hosts --> mark AdminState as decommissioned.
+ 3. Added to exclude --> start decommission.
+ 4. Removed from exclude --> stop decommission.]]>
+ </doc>
+ </method>
+ <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get data node by storage ID.
+
+ @param nodeID
+ @return DatanodeDescriptor or null if the node is not found.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of blocks in the system.]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptReplicaBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns number of blocks with corrupt replicas]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPendingDeletionBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcessBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBlockCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get FSNamesystemMetrics]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[shutdown FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="getNumLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of live data nodes
+ @return Number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="getNumDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return Number of dead data nodes]]>
+ </doc>
+ </method>
+ <method name="setGenerationStamp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stamp" type="long"/>
+ <doc>
+ <![CDATA[Sets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <method name="getGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the generation stamp for this filesystem]]>
+ </doc>
+ </method>
+ <method name="numCorruptReplicas" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ </method>
+ <method name="getDecommissioningNodes" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDelegationTokenSecretManager" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the DelegationTokenSecretManager instance in the namesystem.
+ @return delegation token secret manager object]]>
+ </doc>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param renewer
+ @return Token<DelegationTokenIdentifier>
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param token
+ @return New expiryTime of the token
+ @throws InvalidToken
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param token
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logUpdateMasterKey"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log the updateMasterKey operation to edit logs
+
+ @param key new delegation key.]]>
+ </doc>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Class representing Namenode information for JMX interfaces]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFree" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSafemode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isUpgradeFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNonDfsUsedSpace" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentRemaining" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTotalBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTotalFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getThreads" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLiveNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returned information is a JSON representation of map with host name as the
+ key and value is a map of live node attribute keys to its values]]>
+ </doc>
+ </method>
+ <method name="getDeadNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returned information is a JSON representation of map with host name as the
+ key and value is a map of dead node attribute keys to its values]]>
+ </doc>
+ </method>
+ <method name="getDecomNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returned information is a JSON representation of map with host name as the
+ key and value is a map of decomisioning node attribute keys to its values]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="auditLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Logger for audit events, noting successful FSNamesystem operations. Emits
+ to FSNamesystem.audit at INFO. Each event causes a set of tab-separated
+ <code>key=value</code> pairs to be written for the following properties:
+ <code>
+ ugi=&lt;ugi in RPC&gt;
+ ip=&lt;remote IP&gt;
+ cmd=&lt;command&gt;
+ src=&lt;src path&gt;
+ dst=&lt;dst path (optional)&gt;
+ perm=&lt;permissions (optional)&gt;
+ </code>]]>
+ </doc>
+ </field>
+ <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lmthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replthread" type="org.apache.hadoop.util.Daemon"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[FSNamesystem does the actual bookkeeping work for the
+ DataNode.
+
+ It tracks several important tables.
+
+ 1) valid fsname --> blocklist (kept on disk, logged)
+ 2) Set of all valid blocks (inverted #1)
+ 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
+ 4) machine --> blocklist (inverted #2)
+ 5) LRU cache of updated-heartbeat machines]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
+ <class name="GetDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetDelegationTokenServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="PATH_SPEC" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RENEWER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Serve delegation tokens over http for use in hftp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isValidRequestor" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="remoteUser" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
+ <class name="INodeSymlink" extends="org.apache.hadoop.hdfs.server.namenode.INode"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="isLink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLinkValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSymlink" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An INode representing a symbolic link.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <class name="LeaseExpiredException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LeaseExpiredException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The lease that was being used to create this file has expired.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <class name="LeaseManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@return the lease containing src]]>
+ </doc>
+ </method>
+ <method name="countLease" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the number of leases currently in the system]]>
+ </doc>
+ </method>
+ <method name="setLeasePeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="softLimit" type="long"/>
+ <param name="hardLimit" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[LeaseManager does the lease housekeeping for writing on files.
+ This class also provides useful static methods for lease recovery.
+
+ Lease Recovery Algorithm
+ 1) Namenode retrieves lease information
+ 2) For each file f in the lease, consider the last block b of f
+ 2.1) Get the datanodes which contains b
+ 2.2) Assign one of the datanodes as the primary datanode p
+
+ 2.3) p obtains a new generation stamp form the namenode
+ 2.4) p get the block info from each datanode
+ 2.5) p computes the minimum block length
+ 2.6) p updates the datanodes, which have a valid generation stamp,
+ with the new generation stamp and the minimum block length
+ 2.7) p acknowledges the namenode the update results
+
+ 2.8) Namenode updates the BlockInfo
+ 2.9) Namenode removes f from the lease
+ and removes the lease once all files have been removed
+ 2.10) Namenode commit changes to edit log]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ListPathsServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="buildRoot" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <doc>
+ <![CDATA[Build a map from the query string, setting values and defaults.]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
+ }
+
+ Where <i>option</i> (default) in:
+ recursive (&quot;no&quot;)
+ filter (&quot;.*&quot;)
+ exclude (&quot;\..*\.crc&quot;)
+
+ Response: A flat list of files/directories in the following format:
+ {@code
+ <listing path="..." recursive="(yes|no)" filter="..."
+ time="yyyy-MM-dd hh:mm:ss UTC" version="...">
+ <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
+ <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
+ blocksize="..."
+ replication="..." size="..."/>
+ </listing>
+ }]]>
+ </doc>
+ </method>
+ <field name="df" type="java.lang.ThreadLocal"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Obtain meta-information about a filesystem.
+ @see org.apache.hadoop.hdfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <class name="NameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols"/>
+ <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start NameNode.
+ <p>
+ The name-node can be started with one of the following startup options:
+ <ul>
+ <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
+ <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
+ <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
+ <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
+ <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
+ upgrade and create a snapshot of the current file system state</li>
+ <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
+ cluster back to the previous state</li>
+ <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
+ previous upgrade</li>
+ <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
+ </ul>
+ The option is passed via configuration field:
+ <tt>dfs.namenode.startup</tt>
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code> in the conf.
+
+ @param conf confirguration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Format a new filesystem. Destroys any filesystem that may already
+ exist at this location.]]>
+ </doc>
+ </method>
+ <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="address" type="java.lang.String"/>
+ </method>
+ <method name="setServiceAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="address" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the configuration property for the service rpc address
+ to address]]>
+ </doc>
+ </method>
+ <method name="getServiceAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="fallback" type="boolean"/>
+ <doc>
+ <![CDATA[Fetches the address for services to use when connecting to namenode
+ based on the value of fallback returns null if the special
+ address is not specified or returns the default namenode address
+ to be used by both clients and services.
+ Services here are datanodes, backup node, any non client connection]]>
+ </doc>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namenode" type="java.net.InetSocketAddress"/>
+ </method>
+ <method name="getHostPortString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <doc>
+ <![CDATA[Compose a "host:port" string from the address.]]>
+ </doc>
+ </method>
+ <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getServiceRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a configuration get the address of the service rpc server
+ If the service rpc is not configured returns null]]>
+ </doc>
+ </method>
+ <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setRpcServiceServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Modifies the configuration passed to contain the service rpc address setting]]>
+ </doc>
+ </method>
+ <method name="setRpcServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setHttpServerAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="loadNamesystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize name-node.
+
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getInfoServer" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Wait for service to finish.
+ (Normally, it runs forever.)]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
+ </doc>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="args" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="renewer" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
+ <param name="createParent" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="recoverLease" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client needs to give up on the block.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client has detected an error on the specified located blocks
+ and is reporting them to the server. For now, the namenode will
+ mark the block as corrupt. In the future we might
+ check the blocks are actually corrupt.]]>
+ </doc>
+ </method>
+ <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="updatePipeline"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="concat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trg" type="java.lang.String"/>
+ <param name="src" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="startAfter" type="byte[]"/>
+ <param name="needLocation" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file.
+ @param src The string representation of the path to the file
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file. If the path refers to a
+ symlink then the FileStatus of the symlink is returned.
+ @param src The string representation of the path to the file
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="isInSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the cluster currently in safe mode?]]>
+ </doc>
+ </method>
+ <method name="restoreFailedStorage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <doc>
+ <![CDATA[@throws AccessControlException
+ @inheritDoc]]>
+ </doc>
+ </method>
+ <method name="saveNamespace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the list of datanodes that the namenode should allow to
+ connect. Re-reads conf by creating new HdfsConfiguration object and
+ uses the files list in the configuration to update the list.]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the size of the current edit log.]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the edit log.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the image]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode state into specified file]]>
+ </doc>
+ </method>
+ <method name="listCorruptFileBlocks" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="startBlockAfter" type="java.lang.String"/>
+ <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param path
+ Sub-tree used in querying corrupt files
+ @param startBlockAfter
+ Paging support---pass in the last block returned from the previous
+ call and some # of corrupt blocks after that point are returned
+ @return a list in which each entry describes a corrupt file/block
+ @throws AccessControlException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="namespaceQuota" type="long"/>
+ <param name="diskspaceQuota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="link" type="java.lang.String"/>
+ <param name="dirPerms" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="createParent" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getLinkTarget" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <param name="failedVolumes" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Data node notify the name node that it is alive
+ Return an array of block-oriented commands for the datanode to execute.
+ This will be either a transfer or a delete operation.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Handle an error report from a datanode.]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="verifyRequest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify request.
+
+ Verifies correctness of the datanode version, registration ID, and
+ if the datanode does not need to be shutdown.
+
+ @param nodeReg data node registration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="verifyVersion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="version" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify version.
+
+ @param version
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFsImageName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file]]>
+ </doc>
+ </method>
+ <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFsImageNameCheckpoint" return="java.io.File[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file uploaded by periodic
+ checkpointing]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address on which the NameNodes is listening to.
+ @return the address on which the NameNodes is listening to.]]>
+ </doc>
+ </method>
+ <method name="getHttpAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address of the NameNodes http server,
+ which is used to access the name-node web UI.
+
+ @return the http address.]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="refreshUserToGroupsMappings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="refreshSuperUserGroupsConfiguration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DEFAULT_PORT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="stateChangeLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="role" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="serviceRpcServer" type="org.apache.hadoop.ipc.Server"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[RPC server for HDFS Services communication.
+ BackupNode, Datanodes and all other services
+ should be connecting to this server if it is
+ configured. Clients should only go to NameNode#server]]>
+ </doc>
+ </field>
+ <field name="rpcAddress" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[RPC server address]]>
+ </doc>
+ </field>
+ <field name="serviceRPCAddress" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[RPC server for DN address]]>
+ </doc>
+ </field>
+ <field name="httpServer" type="org.apache.hadoop.http.HttpServer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[httpServer]]>
+ </doc>
+ </field>
+ <field name="httpAddress" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTTP server address]]>
+ </doc>
+ </field>
+ <field name="stopRequested" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[only used for testing purposes]]>
+ </doc>
+ </field>
+ <field name="nodeRegistration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Registration information of this name-node]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[NameNode serves as both directory namespace manager and
+ "inode table" for the Hadoop DFS. There is a single NameNode
+ running in any DFS deployment. (Well, except when there
+ is a second backup/failover NameNode.)
+
+ The NameNode controls two critical tables:
+ 1) filename->blocksequence (namespace)
+ 2) block->machinelist ("inodes")
+
+ The first table is stored on disk and is very precious.
+ The second table is rebuilt every time the NameNode comes
+ up.
+
+ 'NameNode' refers to both this class as well as the 'NameNode server'.
+ The 'FSNamesystem' class actually performs most of the filesystem
+ management. The majority of the 'NameNode' class itself is concerned
+ with exposing the IPC interface and the http server to the outside world,
+ plus some configuration management.
+
+ NameNode implements the ClientProtocol interface, which allows
+ clients to ask for DFS services. ClientProtocol is not
+ designed for direct use by authors of DFS client code. End-users
+ should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+
+ NameNode also implements the DatanodeProtocol interface, used by
+ DataNode programs that actually store DFS data blocks. These
+ methods are invoked repeatedly and automatically by all the
+ DataNodes in a DFS deployment.
+
+ NameNode also implements the NamenodeProtocol interface, used by
+ secondary namenodes or rebalancing processes to get partial namenode's
+ state, for example partial blocksMap etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <class name="NamenodeFsck" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="fsck"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check files on DFS, starting from the indicated path.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CORRUPT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEALTHY_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NONEXISTENT_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILURE_STATUS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIXING_NONE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Don't attempt any fixing .]]>
+ </doc>
+ </field>
+ <field name="FIXING_MOVE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Move corrupted files to /lost+found .]]>
+ </doc>
+ </field>
+ <field name="FIXING_DELETE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete corrupted files.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link #FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
+ <interface name="NameNodeMXBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the version of Hadoop.
+
+ @return the version]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the used space by data nodes.
+
+ @return the used space by data nodes]]>
+ </doc>
+ </method>
+ <method name="getFree" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets total non-used raw bytes.
+
+ @return total non-used raw bytes]]>
+ </doc>
+ </method>
+ <method name="getTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets total raw bytes including non-dfs used space.
+
+ @return the total raw bytes including non-dfs used space]]>
+ </doc>
+ </method>
+ <method name="getSafemode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the safemode status
+
+ @return the safemode status]]>
+ </doc>
+ </method>
+ <method name="isUpgradeFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checks if upgrade is finalized.
+
+ @return true, if upgrade is finalized]]>
+ </doc>
+ </method>
+ <method name="getNonDfsUsedSpace" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets total used space by data nodes for non DFS purposes such as storing
+ temporary files on the local file system
+
+ @return the non dfs space of the cluster]]>
+ </doc>
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the total used space by data nodes as percentage of total capacity
+
+ @return the percentage of used space on the cluster.]]>
+ </doc>
+ </method>
+ <method name="getPercentRemaining" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the total remaining space by data nodes as percentage of total
+ capacity
+
+ @return the percentage of the remaining space on the cluster]]>
+ </doc>
+ </method>
+ <method name="getTotalBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the total numbers of blocks on the cluster.
+
+ @return the total number of blocks of the cluster]]>
+ </doc>
+ </method>
+ <method name="getTotalFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the total number of files on the cluster
+
+ @return the total number of files on the cluster]]>
+ </doc>
+ </method>
+ <method name="getThreads" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the number of threads.
+
+ @return the number of threads]]>
+ </doc>
+ </method>
+ <method name="getLiveNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the live node information of the cluster.
+
+ @return the live node information]]>
+ </doc>
+ </method>
+ <method name="getDeadNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the dead node information of the cluster.
+
+ @return the dead node information]]>
+ </doc>
+ </method>
+ <method name="getDecomNodes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the decommissioning node information of the cluster.
+
+ @return the decommissioning node information]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for namenode information]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <class name="NotReplicatedYetException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NotReplicatedYetException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The file has not finished being written to enough datanodes yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
+ <class name="RenewDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RenewDelegationTokenServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="PATH_SPEC" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TOKEN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Renew delegation tokens over http for use in hftp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <class name="SafeModeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SafeModeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when the name node is in safe mode.
+ Client cannot modified namespace until the safe mode is off.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <class name="SecondaryNameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a connection to the primary namenode.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHttpAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doWork"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
+ The Secondary is responsible for supporting periodic checkpoints
+ of the HDFS metadata. The current design allows only one Secondary
+ NameNode per HDFs cluster.
+
+ The Secondary NameNode is a daemon that periodically wakes
+ up (determined by the schedule specified in the configuration),
+ triggers a periodic checkpoint and then goes back to sleep.
+ The Secondary NameNode uses the ClientProtocol to talk to the
+ primary NameNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StreamFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[getting a client for connecting to dfs]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="CONTENT_LENGTH" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
+ <class name="UnsupportedActionException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnsupportedActionException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when an operation is not supported.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+ <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeObjectNamenode"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process an upgrade command.
+ RPC has only one very generic command for all upgrade related inter
+ component communications.
+ The actual command recognition and execution should be handled here.
+ The reply is sent back also as an UpgradeCommand.
+
+ @param command
+ @return the reply command which is analyzed on the client side.]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forceProceed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Base class for name-node upgrade objects.
+ Data-node upgrades are run in separate threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <interface name="FSNamesystemMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The state of the file system: Safemode or Operational
+ @return the state]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of allocated blocks in the system
+ @return - number of allocated blocks]]>
+ </doc>
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total storage capacity
+ @return - total capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Free (unused) storage capacity
+ @return - free capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used storage capacity
+ @return - used capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of files and directories
+ @return - num of files and directories]]>
+ </doc>
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks pending to be replicated
+ @return - num of blocks to be replicated]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks under replicated
+ @return - num of blocks under replicated]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks scheduled for replication
+ @return - num of blocks scheduled for replication]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total Load on the FSNamesystem
+ @return - total load of FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="getNumLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Live data nodes
+ @return number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="getNumDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return number of dead data nodes]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
+ a name node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+
+ Note we have not used the MetricsDynamicMBeanBase to implement this
+ because the interface for the NameNodeStateMBean is stable and should
+ be published as an interface.
+
+ <p>
+ Name Node runtime activity statistic info is report in another MBean
+ @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <class name="FSNamesystemMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.
+ We set the metrics value within this function before pushing it out.
+ FSNamesystem updates its own local variables which are
+ light weight compared to Metrics counters.
+
+ Some of the metrics are explicity casted to int. Few metrics collectors
+ do not handle long values. It is safe to cast to int for now as all these
+ values fit in int value.
+ Metrics related to DFS capacity are stored in bytes which do not fit in
+ int, so they are rounded to GB]]>
+ </doc>
+ </method>
+ <field name="numExpiredHeartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various FSNamesystem status metrics
+ and publishing them through the metrics interfaces.
+ The SNamesystem creates and registers the JMX MBean.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #filesTotal}.set()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
+ <class name="NameNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NameNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
+ The MBean is register using the name
+ "hadoop:service=NameNode,name=NameNodeActivity"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+ <class name="NameNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesDeleted" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFileInfoOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numcreateSymlinkOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numgetLinkTargetOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesInGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various NameNode activity statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #syncs}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BlockCommand" type="int, java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for transferring blocks to another datanode
+ @param blocktargetlist blocks to be transferred]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockCommand for the given action
+ @param blocks blocks related to the action]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A BlockCommand is an instruction to a datanode
+ regarding some blocks under its control. It tells
+ the DataNode to either invalidate a set of indicated
+ blocks, or to copy a set of indicated blocks to
+ another DataNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
+ <class name="BlockRecoveryCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockRecoveryCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty BlockRecoveryCommand.]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockRecoveryCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create BlockRecoveryCommand with
+ the specified capacity for recovering blocks.]]>
+ </doc>
+ </constructor>
+ <method name="getRecoveringBlocks" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the list of recovering blocks.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <doc>
+ <![CDATA[Add recovering block to the command.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[BlockRecoveryCommand is an instruction to a data-node to recover
+ the specified blocks.
+
+ The data-node that receives this command treats itself as a primary
+ data-node in the recover process.
+
+ Block recovery is identified by a recoveryId, which is also the new
+ generation stamp, which the block will have after the recovery succeeds.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
+ <class name="BlockRecoveryCommand.RecoveringBlock" extends="org.apache.hadoop.hdfs.protocol.LocatedBlock"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockRecoveryCommand.RecoveringBlock"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create empty RecoveringBlock.]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockRecoveryCommand.RecoveringBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create RecoveringBlock.]]>
+ </doc>
+ </constructor>
+ <method name="getNewGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the new generation stamp of the block,
+ which also plays role of the recovery id.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a block with locations from which it should be recovered
+ and the new generation stamp, which the block will have after
+ successful recovery.
+
+ The new generation stamp of the block, also plays role of the recovery id.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <class name="BlocksWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with one parameter]]>
+ </doc>
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[getter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to implement an array of BlockLocations
+ It provide efficient customized serialization/deserialization methods
+ in stead of using the default array (de)serialization provided by RPC]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlocksWithLocations.BlockWithLocations"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block]]>
+ </doc>
+ </method>
+ <method name="getDatanodes" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the block's locations]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialization method]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialization method]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class to keep track of a block and its locations]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
+ <class name="CheckpointCommand" extends="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CheckpointCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CheckpointCommand" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature, boolean, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSignature" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checkpoint signature is used to ensure
+ that nodes are talking about the same checkpoint.]]>
+ </doc>
+ </method>
+ <method name="isImageObsolete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates whether current backup image is obsolete, and therefore
+ need to be discarded?
+
+ @return true if current image should be discarded.]]>
+ </doc>
+ </method>
+ <method name="needToReturnImage" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates whether the new checkpoint image needs to be transfered
+ back to the name-node after the checkpoint is done.
+
+ @return true if the checkpoint should be returned back.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Checkpoint command.
+ <p>
+ Returned to the backup node by the name-node as a reply to the
+ {@link NamenodeProtocol#startCheckpoint(NamenodeRegistration)}
+ request.<br>
+ Contains:
+ <ul>
+ <li>{@link CheckpointSignature} identifying the particular checkpoint</li>
+ <li>indicator whether the backup image should be discarded before starting
+ the checkpoint</li>
+ <li>indicator whether the image should be transfered back to the name-node
+ upon completion of the checkpoint.</li>
+ </ul>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <class name="DatanodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base class for data-node command.
+ Issued by the name-node to notify data-nodes what should be done.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <interface name="DatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register Datanode.
+
+ @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
+ @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
+
+ @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
+ new storageID if the datanode did not have one and
+ registration ID for further communication.]]>
+ </doc>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <param name="failedVolumes" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
+ alive and well. Includes some status info, too.
+ It also gives the NameNode a chance to return
+ an array of "DatanodeCommand" objects.
+ A DatanodeCommand tells the DataNode to invalidate local block(s),
+ or to copy them to other DataNodes, etc.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
+ The NameNode returns an array of Blocks that have become obsolete
+ and should be deleted. This function is meant to upload *all*
+ the locally-stored blocks. It's invoked upon startup and then
+ infrequently afterwards.
+ @param registration
+ @param blocks - the block list as an array of longs.
+ Each block is represented as 2 longs.
+ This is done instead of Block[] to reduce memory used by block reports.
+
+ @return - the next command for DN to process.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
+ recently-received block data, with a hint for pereferred replica
+ to be deleted when there is any excessive blocks.
+ For example, whenever client code
+ writes a new Block here, or another DataNode copies a Block to
+ this DataNode, it will call blockReceived().]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[errorReport() tells the NameNode about something that has gone
+ awry. Useful for debugging.]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is a very general way to send a command to the name-node during
+ distributed upgrade process.
+
+ The generosity is because the variety of upgrade commands is unpredictable.
+ The reply from the name-node is also received in the form of an upgrade
+ command.
+
+ @return a reply in the form of an upgrade command]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
+ }]]>
+ </doc>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Commit block synchronization in lease recovery]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[26: remove getBlockLocations optimization]]>
+ </doc>
+ </field>
+ <field name="NOTIFY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DISK_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INVALID_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FATAL_DISK_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_UNKNOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Determines actions that data node should perform
+ when receiving a datanode command.]]>
+ </doc>
+ </field>
+ <field name="DNA_TRANSFER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_INVALIDATE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_SHUTDOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_REGISTER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_FINALIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_RECOVERBLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DNA_ACCESSKEYUPDATE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
+ It's used to upload current load information and block reports.
+
+ The only way a NameNode can communicate with a DataNode is by
+ returning values from these functions.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <constructor name="DatanodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeRegistration" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeRegistration]]>
+ </doc>
+ </constructor>
+ <method name="setInfoPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="infoPort" type="int"/>
+ </method>
+ <method name="setIpcPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ipcPort" type="int"/>
+ </method>
+ <method name="setStorageInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="exportedKeys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeRegistration class contains all information the name-node needs
+ to identify and verify a data-node when it contacts the name-node.
+ This information is sent by data-node with each communication request.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <class name="DisallowedDatanodeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when a datanode tries to register or communicate
+ with the namenode when it does not appear on the list of included nodes,
+ or has been specifically excluded.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <interface name="InterDatanodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a replica recovery.
+
+ @return actual state of the replica on this data-node or
+ null if data-node does not have the replica.]]>
+ </doc>
+ </method>
+ <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
+ <param name="recoveryId" type="long"/>
+ <param name="newLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update replica with the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[5: getBlockMetaDataInfo(), updateBlock() removed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An inter-datanode protocol for updating generation stamp]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
+ <class name="KeyUpdateCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KeyUpdateCommand" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExportedKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
+ <class name="NamenodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamenodeCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for name-node command.
+ Issued by the name-node to notify other name-nodes what should be done.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <interface name="NamenodeProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a list of blocks belonging to <code>datanode</code>
+ whose total size equals <code>size</code>.
+
+ @see org.apache.hadoop.hdfs.server.balancer.Balancer
+ @param datanode a data node
+ @param size requested size
+ @return a list of blocks & their locations
+ @throws RemoteException if size is less than or equal to 0 or
+ datanode does not exist]]>
+ </doc>
+ </method>
+ <method name="getBlockKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the current block keys
+
+ @return ExportedBlockKeys containing current block keys
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the size of the current edit log (in bytes).
+ @return The number of bytes in the current edit log.
+ @throws IOException
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the current edit log and opens a new one. The
+ call fails if the file system is in SafeMode.
+ @throws IOException
+ @return a unique token to identify this transaction.
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
+ new image to fsImage, removes the old edits and renames edits.new
+ to edits. The call fails if any of the four files are missing.
+
+ @param sig the signature of this checkpoint (old fsimage)
+ @throws IOException
+ @deprecated
+ See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
+ </doc>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request name-node version and storage information.
+
+ @return {@link NamespaceInfo} identifying versions and storage information
+ of the name-node
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report to the active name-node an error occurred on a subordinate node.
+ Depending on the error code the active node may decide to unregister the
+ reporting node.
+
+ @param registration requesting node.
+ @param errorCode indicates the error
+ @param msg free text description of the error
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Register a subordinate name-node like backup node.
+
+ @return {@link NamenodeRegistration} of the node,
+ which this node has just registered with.]]>
+ </doc>
+ </method>
+ <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A request to the active name-node to start a checkpoint.
+ The name-node should decide whether to admit it or reject.
+ The name-node also decides what should be done with the backup node
+ image before and after the checkpoint.
+
+ @see CheckpointCommand
+ @see NamenodeCommand
+ @see #ACT_SHUTDOWN
+
+ @param registration the requesting node
+ @return {@link CheckpointCommand} if checkpoint is allowed.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="endCheckpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A request to the active name-node to finalize
+ previously started checkpoint.
+
+ @param registration the requesting node
+ @param sig {@code CheckpointSignature} which identifies the checkpoint.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="journalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the size of the active name-node journal (edit log) in bytes.
+
+ @param registration the requesting node
+ @return The number of bytes in the journal.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="journal"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
+ <param name="jAction" type="int"/>
+ <param name="length" type="int"/>
+ <param name="records" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Journal edit records.
+ This message is sent by the active name-node to the backup node
+ via {@code EditLogBackupOutputStream} in order to synchronize meta-data
+ changes with the backup namespace image.
+
+ @param registration active node registration
+ @param jAction journal action
+ @param length length of the byte array
+ @param records byte array containing serialized journal records
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compared to the previous version the following changes have been introduced:
+ (Only the latest change is reflected.
+ The log of historical changes can be retrieved from the svn).
+
+ 5: Added one parameter to rollFSImage() and
+ changed the definition of CheckpointSignature]]>
+ </doc>
+ </field>
+ <field name="NOTIFY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FATAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_IS_ALIVE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_JOURNAL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_JSPOOL_START" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JA_CHECKPOINT_TIME" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_UNKNOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_SHUTDOWN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ACT_CHECKPOINT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
+ It's used to get part of the name node state]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
+ <interface name="NamenodeProtocols" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
+ <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
+ <implements name="org.apache.hadoop.security.RefreshUserMappingsProtocol"/>
+ <doc>
+ <![CDATA[The full set of RPC methods implemented by the Namenode.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
+ <class name="NamenodeRegistration" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
+ <constructor name="NamenodeRegistration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamenodeRegistration" type="java.lang.String, java.lang.String, org.apache.hadoop.hdfs.server.common.StorageInfo, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get name-node role.]]>
+ </doc>
+ </method>
+ <method name="isRole" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"/>
+ </method>
+ <method name="getCheckpointTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the age of the image.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Information sent by a subordinate name-node to the active name-node
+ during the registration process.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamespaceInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NamespaceInfo" type="int, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDistributedUpgradeVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[NamespaceInfo is returned by the name-node in reply
+ to a data-node handshake.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
+ <!-- start interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
+ <interface name="NodeRegistration" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get address of the server node.
+ @return hostname:portNumber]]>
+ </doc>
+ </method>
+ <method name="getRegistrationID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get registration ID of the server node.]]>
+ </doc>
+ </method>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get layout version of the server node.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Generic class specifying information, which need to be sent to the name-node
+ during the registration process.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
+ <class name="ReplicaRecoveryInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReplicaRecoveryInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ReplicaRecoveryInfo" type="long, long, long, org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getOriginalReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Replica recovery information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
+ <class name="ServerCommand" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ServerCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Unknown server command constructor.
+ Creates a command with action 0.
+
+ @see NamenodeProtocol#ACT_UNKNOWN
+ @see DatanodeProtocol#DNA_UNKNOWN]]>
+ </doc>
+ </constructor>
+ <constructor name="ServerCommand" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a command for the specified action.
+ Actions are protocol specific.
+
+ @see DatanodeProtocol
+ @see NamenodeProtocol
+ @param action]]>
+ </doc>
+ </constructor>
+ <method name="getAction" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get server command action.
+ @return action code.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Base class for a server command.
+ Issued by the name-node to notify other servers what should be done.
+ Commands are defined by actions defined in respective protocols.
+
+ @see DatanodeProtocol
+ @see NamenodeProtocol]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
+ <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+ <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UpgradeCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeCommand" type="int, int, short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="UC_ACTION_REPORT_STATUS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UC_ACTION_START_UPGRADE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This as a generic distributed upgrade command.
+
+ During the upgrade cluster components send upgrade commands to each other
+ in order to obtain or share information with them.
+ It is supposed that each upgrade defines specific upgrade command by
+ deriving them from this class.
+ The upgrade command contains version of the upgrade, which is verified
+ on the receiving side and current status of the upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+ <!-- start class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
+ <class name="DelegationTokenFetcher" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DelegationTokenFetcher"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Command-line interface]]>
+ </doc>
+ </method>
+ <method name="getDTfromRemote" return="org.apache.hadoop.security.Credentials"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nnAddr" type="java.lang.String"/>
+ <param name="renewer" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="renewDelegationToken" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nnAddr" type="java.lang.String"/>
+ <param name="tok" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renew a Delegation Token.
+ @param nnAddr the NameNode's address
+ @param tok the token to renew
+ @return the Date that the token will expire next.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cancelDelegationToken"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nnAddr" type="java.lang.String"/>
+ <param name="tok" type="org.apache.hadoop.security.token.Token"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Cancel a Delegation Token.
+ @param nnAddr the NameNode's address
+ @param tok the token to cancel
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Fetch a DelegationToken from the current Namenode and store it in the
+ specified file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <method name="report"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gives a report on how the FileSystem is doing.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Safe mode maintenance command.
+ Usage: java DFSAdmin -safemode [enter | leave | get]
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="saveNamespace" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to save the namespace.
+ Usage: java DFSAdmin -saveNamespace
+ @exception IOException
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
+ </doc>
+ </method>
+ <method name="restoreFaileStorage" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to enable/disable/check restoring of failed storage replicas in the namenode.
+ Usage: java DFSAdmin -restoreFailedStorage true|false|check
+ @exception IOException
+ @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
+ file.
+ Usage: java DFSAdmin -refreshNodes
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
+ Usage: java DFSAdmin -finalizeUpgrade
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="upgradeProgress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to request current distributed upgrade status,
+ a detailed status, or to force the upgrade to proceed.
+
+ Usage: java DFSAdmin -upgradeProgress [status | details | force]
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ Usage: java DFSAdmin -metasave filename
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if an error accoured wile accessing
+ the file or path.]]>
+ </doc>
+ </method>
+ <method name="printTopology" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Display each rack and the nodes assigned to that rack, as determined
+ by the NameNode, in a hierarchical manner. The nodes and racks are
+ sorted alphabetically.
+
+ @throws IOException If an error while getting datanode report]]>
+ </doc>
+ </method>
+ <method name="refreshServiceAcl" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the authorization policy on the {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="refreshUserToGroupsMappings" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the user-to-groups mappings on the {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="refreshSuperUserGroupsConfiguration" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[refreshSuperUserGroupsConfiguration {@link NameNode}.
+ @return exitcode 0 on success, non-zero on failure
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param argv The parameters passed to this program.
+ @exception Exception if the filesystem does not exist.
+ @return 0 on success, non zero on error.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides some DFS administrative access.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
+ <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
+ <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf current Configuration]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration, java.io.PrintStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.
+ The tool also provides and option to filter open files during the scan.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
+ <!-- start class org.apache.hadoop.hdfs.tools.HDFSConcat -->
+ <class name="HDFSConcat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HDFSConcat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.HDFSConcat -->
+ <!-- start class org.apache.hadoop.hdfs.tools.JMXGet -->
+ <class name="JMXGet" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMXGet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="service" type="java.lang.String"/>
+ </method>
+ <method name="setPort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="port" type="java.lang.String"/>
+ </method>
+ <method name="setServer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="java.lang.String"/>
+ </method>
+ <method name="setLocalVMUrl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.lang.String"/>
+ </method>
+ <method name="printAllValues"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[print all attributes' values]]>
+ </doc>
+ </method>
+ <method name="getValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.String"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[get single value by key]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@throws Exception
+ initializes MBeanServer]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[main
+
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[tool to get data from NameNode or DataNode using MBeans currently the
+ following MBeans are available (under hadoop domain):
+ hadoop:service=NameNode,name=FSNamesystemState (static)
+ hadoop:service=NameNode,name=NameNodeActivity (dynamic)
+ hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
+ hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
+ hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
+ (static)
+ hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215
+ (dynamic)
+
+
+ implementation note: all logging is sent to System.err (since it is a command
+ line tool)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.JMXGet -->
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+ <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.NameDistributionVisitor -->
+ <class name="NameDistributionVisitor" extends="org.apache.hadoop.hdfs.tools.offlineImageViewer.TextWriterImageVisitor"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NameDistributionVisitor" type="java.lang.String, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <doc>
+ <![CDATA[File name distribution visitor.
+ <p>
+ It analyzes file names in fsimage and prints the following information:
+ <li>Number of unique file names</li>
+ <li>Number file names and the corresponding number range of files that use
+ these same names</li>
+ <li>Heap saved if the file name objects are reused</li>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.NameDistributionVisitor -->
+ <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
+ <class name="OfflineImageViewer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OfflineImageViewer" type="java.lang.String, org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="go"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Process image file.]]>
+ </doc>
+ </method>
+ <method name="buildOptions" return="org.apache.commons.cli.Options"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Build command-line options and descriptions]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Entry point to command-line-driven operation. User may specify
+ options and start fsimage viewer from the command line. Program
+ will process image file and exit cleanly or, if an error is
+ encountered, inform user and exit.
+
+ @param args Command line options
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[OfflineImageViewer to dump the contents of an Hadoop image file to XML
+ or the console. Main entry point into utility, either via the
+ command line or programatically.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+ <!-- start class org.apache.hadoop.hdfs.util.ByteArray -->
+ <class name="ByteArray" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteArray" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <doc>
+ <![CDATA[Wrapper for byte[] to use byte[] as key in HashMap]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.util.ByteArray -->
+ <!-- start class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
+ <class name="DataTransferThrottler" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataTransferThrottler" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param bandwidthPerSec bandwidth allowed in bytes per second.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataTransferThrottler" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param period in milliseconds. Bandwidth is enforced over this
+ period.
+ @param bandwidthPerSec bandwidth allowed in bytes per second.]]>
+ </doc>
+ </constructor>
+ <method name="getBandwidth" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return current throttle bandwidth in bytes per second.]]>
+ </doc>
+ </method>
+ <method name="setBandwidth"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytesPerSecond" type="long"/>
+ <doc>
+ <![CDATA[Sets throttle bandwidth. This takes affect latest by the end of current
+ period.
+
+ @param bytesPerSecond]]>
+ </doc>
+ </method>
+ <method name="throttle"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOfBytes" type="long"/>
+ <doc>
+ <![CDATA[Given the numOfBytes sent/received since last time throttle was called,
+ make the current thread sleep if I/O rate is too fast
+ compared to the given bandwidth.
+
+ @param numOfBytes
+ number of bytes sent/received since last time throttle was called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a class to throttle the data transfers.
+ This class is thread safe. It can be shared by multiple threads.
+ The parameter bandwidthPerSec specifies the total bandwidth shared by
+ threads.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
+ <!-- start interface org.apache.hadoop.hdfs.util.GSet -->
+ <interface name="GSet" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The size of this set.]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Does this set contain an element corresponding to the given key?
+ @param key The given key.
+ @return true if the given key equals to a stored element.
+ Otherwise, return false.
+ @throws NullPointerException if key == null.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return the stored element which is equal to the given key.
+ This operation is similar to {@link java.util.Map#get(Object)}.
+ @param key The given key.
+ @return The stored element if it exists.
+ Otherwise, return null.
+ @throws NullPointerException if key == null.]]>
+ </doc>
+ </method>
+ <method name="put" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Add/replace an element.
+ If the element does not exist, add it to the set.
+ Otherwise, replace the existing element.
+
+ Note that this operation
+ is similar to {@link java.util.Map#put(Object, Object)}
+ but is different from {@link java.util.Set#add(Object)}
+ which does not replace the existing element if there is any.
+
+ @param element The element being put.
+ @return the previous stored element if there is any.
+ Otherwise, return null.
+ @throws NullPointerException if element == null.]]>
+ </doc>
+ </method>
+ <method name="remove" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Remove the element corresponding to the given key.
+ This operation is similar to {@link java.util.Map#remove(Object)}.
+ @param key The key of the element being removed.
+ @return If such element exists, return it.
+ Otherwise, return null.
+ @throws NullPointerException if key == null.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link GSet} is set,
+ which supports the {@link #get(Object)} operation.
+ The {@link #get(Object)} operation uses a key to lookup an element.
+
+ Null element is not supported.
+
+ @param <K> The type of the keys.
+ @param <E> The type of the elements, which must be a subclass of the keys.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.util.GSet -->
+ <!-- start class org.apache.hadoop.hdfs.util.GSetByHashMap -->
+ <class name="GSetByHashMap" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.util.GSet"/>
+ <constructor name="GSetByHashMap" type="int, float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="k" type="java.lang.Object"/>
+ </method>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="k" type="java.lang.Object"/>
+ </method>
+ <method name="put" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ </method>
+ <method name="remove" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="k" type="java.lang.Object"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link GSet} implementation by {@link HashMap}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.util.GSetByHashMap -->
+ <!-- start class org.apache.hadoop.hdfs.util.LightWeightGSet -->
+ <class name="LightWeightGSet" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.util.GSet"/>
+ <constructor name="LightWeightGSet" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param recommended_length Recommended size of the internal array.]]>
+ </doc>
+ </constructor>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ </method>
+ <method name="put" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ </method>
+ <method name="remove" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="printDetails"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print detailed information of this object.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A low memory footprint {@link GSet} implementation,
+ which uses an array for storing the elements
+ and linked lists for collision resolution.
+
+ No rehash will be performed.
+ Therefore, the internal array will never be resized.
+
+ This class does not support null element.
+
+ This class is not thread safe.
+
+ @param <K> Key type for looking up the elements
+ @param <E> Element type, which must be
+ (1) a subclass of K, and
+ (2) implementing {@link LinkedElement} interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.util.LightWeightGSet -->
+ <!-- start interface org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement -->
+ <interface name="LightWeightGSet.LinkedElement" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setNext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="next" type="org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement"/>
+ <doc>
+ <![CDATA[Set the next element.]]>
+ </doc>
+ </method>
+ <method name="getNext" return="org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the next element.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Elements of {@link LightWeightGSet}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement -->
+</package>
+
+</api>