<%@ page contentType="text/html; charset=UTF-8" isThreadSafe="false" import="javax.servlet.*" import="javax.servlet.http.*" import="java.io.*" import="java.util.*" import="org.apache.hadoop.fs.*" import="org.apache.hadoop.hdfs.*" import="org.apache.hadoop.hdfs.server.common.*" import="org.apache.hadoop.hdfs.server.namenode.*" import="org.apache.hadoop.hdfs.server.datanode.*" import="org.apache.hadoop.hdfs.protocol.*" import="org.apache.hadoop.util.*" import="java.text.DateFormat" import="java.lang.Math" import="java.net.URLEncoder" %> <%! JspHelper jspHelper = new JspHelper(); int rowNum = 0; int colNum = 0; String rowTxt() { colNum = 0; return " "; } String colTxt() { return " "; } void counterReset () { colNum = 0; rowNum = 0 ; } long diskBytes = 1024 * 1024 * 1024; String diskByteStr = "GB"; String sorterField = null; String sorterOrder = null; String whatNodes = "LIVE"; String NodeHeaderStr(String name) { String ret = "class=header"; String order = "ASC"; if ( name.equals( sorterField ) ) { ret += sorterOrder; if ( sorterOrder.equals("ASC") ) order = "DSC"; } ret += " onClick=\"window.document.location=" + "'/dfsnodelist.jsp?whatNodes="+whatNodes+"&sorter/field=" + name + "&sorter/order=" + order + "'\" title=\"sort on this column\""; return ret; } void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d, String suffix, boolean alive, int nnHttpPort) throws IOException { String url = "http://" + d.getHostName() + ":" + d.getInfoPort() + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + URLEncoder.encode("/", "UTF-8"); String name = d.getHostName() + ":" + d.getPort(); if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*")) name = name.replaceAll("\\.[^.:]*", ""); int idx = (suffix != null && name.endsWith(suffix)) ? name .indexOf(suffix) : -1; out.print(rowTxt() + "" + ((idx > 0) ? name.substring(0, idx) : name) + "" + ((alive) ? "" : "\n")); if (!alive) { return; } long decommRequestTime = d.decommissioningStatus.getStartTime(); long timestamp = d.getLastUpdate(); long currentTime = System.currentTimeMillis(); long hoursSinceDecommStarted = (currentTime - decommRequestTime)/3600000; long remainderMinutes = ((currentTime - decommRequestTime)/60000) % 60; out.print(" " + ((currentTime - timestamp) / 1000) + "" + d.decommissioningStatus.getUnderReplicatedBlocks() + "" + d.decommissioningStatus.getDecommissionOnlyReplicas() + "" + d.decommissioningStatus.getUnderReplicatedInOpenFiles() + "" + hoursSinceDecommStarted + " hrs " + remainderMinutes + " mins" + "\n"); } public void generateNodeData( JspWriter out, DatanodeDescriptor d, String suffix, boolean alive, int nnHttpPort ) throws IOException { /* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use: 1) d.getHostName():d.getPort() to display. Domain and port are stripped if they are common across the nodes. i.e. "dn1" 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010" 3) d.getHostName():d.getInfoPort() for url. i.e. "http://dn1.hadoop.apache.org:50075/..." Note that "d.getHost():d.getPort()" is what DFS clients use to interact with datanodes. */ // from nn_browsedfscontent.jsp: String url = "http://" + d.getHostName() + ":" + d.getInfoPort() + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + URLEncoder.encode("/", "UTF-8"); String name = d.getHostName() + ":" + d.getPort(); if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) name = name.replaceAll( "\\.[^.:]*", "" ); int idx = (suffix != null && name.endsWith( suffix )) ? name.indexOf( suffix ) : -1; out.print( rowTxt() + "" + (( idx > 0 ) ? name.substring(0, idx) : name) + "" + (( alive ) ? "" : "\n") ); if ( !alive ) return; long c = d.getCapacity(); long u = d.getDfsUsed(); long nu = d.getNonDfsUsed(); long r = d.getRemaining(); String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent()); String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent()); String adminState = (d.isDecommissioned() ? "Decommissioned" : (d.isDecommissionInProgress() ? "Decommission In Progress": "In Service")); long timestamp = d.getLastUpdate(); long currentTime = System.currentTimeMillis(); out.print(" " + ((currentTime - timestamp)/1000) + "" + adminState + "" + StringUtils.limitDecimalTo2(c*1.0/diskBytes) + "" + StringUtils.limitDecimalTo2(u*1.0/diskBytes) + "" + StringUtils.limitDecimalTo2(nu*1.0/diskBytes) + "" + StringUtils.limitDecimalTo2(r*1.0/diskBytes) + "" + percentUsed + "" + ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) + "" + percentRemaining + "" + d.numBlocks() + "\n"); } public void generateDFSNodesList(JspWriter out, NameNode nn, HttpServletRequest request) throws IOException { ArrayList live = new ArrayList(); ArrayList dead = new ArrayList(); jspHelper.DFSNodesStatus(live, dead); //verify input for correctness String whatNodes = request.getParameter("whatNodes");// show only live or only dead nodes if (whatNodes == null || whatNodes.length() == 0) { out.print("Invalid input"); return; } sorterField = request.getParameter("sorter/field"); sorterOrder = request.getParameter("sorter/order"); if ( sorterField == null ) sorterField = "name"; if ( sorterOrder == null ) sorterOrder = "ASC"; jspHelper.sortNodeList(live, sorterField, sorterOrder); jspHelper.sortNodeList(dead, "name", "ASC"); // Find out common suffix. Should this be before or after the sort? String port_suffix = null; if ( live.size() > 0 ) { String name = live.get(0).getName(); int idx = name.indexOf(':'); if ( idx > 0 ) { port_suffix = name.substring( idx ); } for ( int i=1; port_suffix != null && i < live.size(); i++ ) { if ( live.get(i).getName().endsWith( port_suffix ) == false ) { port_suffix = null; break; } } } counterReset(); try { Thread.sleep(1000); } catch (InterruptedException e) {} if (live.isEmpty() && dead.isEmpty()) { out.print("There are no datanodes in the cluster"); } else { int nnHttpPort = nn.getHttpAddress().getPort(); out.print( "
"); if(whatNodes.equals("LIVE")) { out.print( "" + "Live Datanodes : " + live.size() + "" + "

\n\n" ); counterReset(); if ( live.size() > 0 ) { if ( live.get(0).getCapacity() > 1024 * diskBytes ) { diskBytes *= 1024; diskByteStr = "TB"; } out.print( "
Node Last
Contact
Admin State Configured
Capacity (" + diskByteStr + ")
Used
(" + diskByteStr + ")
Non DFS
Used (" + diskByteStr + ")
Remaining
(" + diskByteStr + ")
Used
(%)
Used
(%)
Remaining
(%)
Blocks\n" ); jspHelper.sortNodeList(live, sorterField, sorterOrder); for ( int i=0; i < live.size(); i++ ) { generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort); } } out.print("
\n"); } else if (whatNodes.equals("DEAD")) { out.print("
" + " Dead Datanodes : " +dead.size() + "

\n"); if ( dead.size() > 0 ) { out.print( " " + "
Node \n" ); jspHelper.sortNodeList(dead, "name", "ASC"); for ( int i=0; i < dead.size() ; i++ ) { generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort); } out.print("
\n"); } } else if (whatNodes.equals("DECOMMISSIONING")) { // Decommissioning Nodes ArrayList decommissioning = nn.getNamesystem() .getDecommissioningNodes(); out.print("
" + " Decommissioning Datanodes : " + decommissioning.size() + "

\n"); if (decommissioning.size() > 0) { out.print(" " + "
Node Last
Contact
Under Replicated Blocks Blocks With No
Live Replicas
Under Replicated Blocks
In Files Under Construction" + "
Time Since Decommissioning Started" ); jspHelper.sortNodeList(decommissioning, "name", "ASC"); for (int i = 0; i < decommissioning.size(); i++) { generateDecommissioningNodeData(out, decommissioning.get(i), port_suffix, true, nnHttpPort); } out.print("
\n"); } out.print("
"); } else { // if nothing matches then print invalid input out.println("Invalid input"); } } }%> <% NameNode nn = (NameNode)application.getAttribute("name.node"); FSNamesystem fsn = nn.getNamesystem(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); %> Hadoop NameNode <%=namenodeLabel%>

NameNode '<%=namenodeLabel%>'

Started: <%= fsn.getStartTime()%>
Version: <%= VersionInfo.getVersion()%>, r<%= VersionInfo.getRevision()%>
Compiled: <%= VersionInfo.getDate()%> by <%= VersionInfo.getUser()%>
Upgrades: <%= jspHelper.getUpgradeStatusText()%>

Browse the filesystem
Namenode Logs
Go back to DFS home
<% generateDFSNodesList(out, nn, request); %> <% out.println(ServletUtil.htmlFooter()); %>