java写hdfs程序
1、hadoop默认临时数据文件是存储于Unix的tmp目录下(cd /tmp 包含hadoop-root等文件),如果不进行修改,linux系统重启后hadoop有可能出现不正常现象;故需要修改hadoop的临时文件存放目录
2、vim core-site.xml 配置如下,然后重启hadoop集群,不要对namenode重新进行格式化操作
修改datanode /var/hadoop/dfs/data/current 目录下VERSION文件的clusterid与namenode一致;然后启动集群正常
在namenode执行格式化操作后,会导致namenode重新生成clusterid,而datanode的clusterID值没变,
namenode与datanode clusterid不一致导致datanode启动异常;需要手动改成与namenode一致
3、测试时,可以关闭权限检查(否则没有权限访问),在namenode节点添加如下配置
vim hdfs-site.xml
10年积累的网站设计制作、成都网站设计经验,可以快速应对客户对网站的新想法和需求。提供各种问题对应的解决方案。让选择我们的客户得到更好、更有力的网络服务。我虽然不认识你,你也不认识我。但先网站设计制作后付款的网站建设流程,更有尼河口免费网站建设让你可以放心的选择与我们合作。
UTF-8
2.7.3
junit
junit
4.12
org.apache.hadoop
hadoop-client
${hadoop.version}
org.apache.hadoop
hadoop-common
${hadoop.version}
org.apache.hadoop
hadoop-hdfs
${hadoop.version}
package com.skcc.hadoop;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.text.NumberFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
public class HelloHDFS {
public HelloHDFS() {
// TODO Auto-generated constructor stub
}
public static FileSystem getFileSystemInstance() {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://172.26.19.40:9000");
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.get(conf);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return fileSystem;
}
public static void getFileFromHDFS() throws Exception {
//URL 默认处理http协议, FsUrlStreamHandlerFactory 处理hdfs协议
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
URL url=new URL("hdfs://172.26.19.40:9000/10803060234.txt");
InputStream inputStream= url.openStream();
IOUtils.copyBytes(inputStream, System.out, 4096,true);
}
public static void getFileFromBaiDu() throws IOException {
URL url=new URL("http://skynet.skhynix-cq.com.cn/plusWare/Main.aspx");
InputStream inputStream= url.openStream();
IOUtils.copyBytes(inputStream, System.out, 4096,true);
}
public static void testHadoop() throws Exception {
FileSystem fileSystem = getFileSystemInstance();
Boolean success = fileSystem.mkdirs(new Path("/skcc"));
System.out.println("mkdirs is " + success);
success = fileSystem.exists(new Path("/10803060234.txt"));
System.out.println("file exists is " + success);
success = fileSystem.delete(new Path("/test2.data"),true);
System.out.println("delete dirs is " + success);
success = fileSystem.exists(new Path("/skcc"));
System.out.println("dirs exists is "+ success);
}
public static void uploadFileToHDFS() throws Exception {
FileSystem fileSystem = getFileSystemInstance();
String filename = "/test2.data";
// overwrite ==true
FSDataOutputStream outputStream = fileSystem.create(new Path(filename), true);
FileInputStream fis = new FileInputStream("D:\\2018\\u001.zip");
// IOUtils.copyBytes(fis, outputStream, 4096, true);
long totalLen = fis.getChannel().size();
long tmpSize = 0;
double readPercent = 0;
NumberFormat numberFormat = NumberFormat.getInstance();
numberFormat.setMaximumFractionDigits(0);
System.out.println("totalLen : " + totalLen + " available : " + fis.available());
byte[] buf = new byte[4096];
int len = fis.read(buf);
while (len != -1) {
tmpSize = tmpSize + len;
String result = numberFormat.format((float)tmpSize / (float)totalLen * 100 );
outputStream.write(buf,0,len);
System.out.println("Upload Percent : " + result + "%");
len = fis.read(buf);
}
}
}
当前文章:java写hdfs程序
本文链接:http://scjbc.cn/article/jpicsj.html