赞
踩
注:不同于读取是多节点和 客户端并发,写入是管线化的,单节点和 客户端交互
hsync 方法同步刷新缓存,让内容对另外线程可见
archive -archiveName NAME -p <parent path> <src>* <dest>
hadoop archive -archiveName 419.har -p /fc/src/20120116/ 419 /user/heipark
hadoop archive -archiveName combine.har -p /fc/src/20120116/ 419 512 334 /user/heipark
hadoop archive -archiveName combine.har -p /fc/src/20120116/ /user/heipark
hadoop archive -archiveName combine.har -p /fc/src/2011 1[0-2] /user/heipark
- hadoop fs -ls har:user/heipark/20120108_15.har/
-
- - 输出如下:
- drw-r--r-- - hdfs hadoop 0 2012-01-17 16:30 /user/heipark/20120108_15.har/2025
- drw-r--r-- - hdfs hadoop 0 2012-01-17 16:30 /user/heipark/20120108_15.har/2029
-
-
- - 使用hdfs文件系统查看har文件
- hadoop fs -ls /user/yue.zhang/20120108_15.har/
- - 输出如下:
- -rw-r--r-- 2 hdfs hadoop 0 2012-01-17 16:30 /user/heipark/20120108_15.har/_SUCCESS
- -rw-r--r-- 5 hdfs hadoop 2411 2012-01-17 16:30 /user/heipark/20120108_15.har/_index
- -rw-r--r-- 5 hdfs hadoop 24 2012-01-17 16:30 /user/heipark/20120108_15.har/_masterindex
- -rw-r--r-- 2 hdfs hadoop 191963 2012-01-17 16:30 /user/heipark/20120108_15.har/part-0
-
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- conf.set("fs.default.name", "hdfs://xxx.xxx.xxx.xxx:9000");
-
- HarFileSystem fs = new HarFileSystem();
- fs.initialize(new URI("har:///user/heipark/20120108_15.har"), conf);
- FileStatus[] listStatus = fs.listStatus(new Path("sub_dir"));
- for (FileStatus fileStatus : listStatus) {
- System.out.println(fileStatus.getPath().toString());
- }
- }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。