本篇内容介绍了“hadoop怎么合并sequcefie并在map中读取”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!
package hgs.sequencefile;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
//合并小文件
public class SequenceMain {
public static void main(String[] args) throws IOException, URISyntaxException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://192.168.6.129:9000"),conf);
//获得该文件夹下的所有的文件
FileStatus[] fstats = fs.listStatus(new Path("/words"));
//System.out.println(fstats.length);
Text key = new Text();
Text value = new Text();
@SuppressWarnings("deprecation")
//创建一个sequecewriter
//merge.seq是文件名
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, new Path("/sequence/merge.seq"), key.getClass(), value.getClass());
//循环遍历每个文件
for(FileStatus fis : fstats) {
//将每个文件以key value的形式写入到sequencefile中
FSDataInputStream finput = fs.open(fis.getPath());
byte[] buffer = new byte[(int)fis.getLen()];
IOUtils.readFully(finput, buffer, 0, buffer.length);
//文件名为key 文件内容为value
key.set(fis.getPath().getName());
value.set(buffer);
writer.append(key, value);
finput.close();
}
writer.close();
fs.close();
}
}
package hgs.sequencefile;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class SequnceMapper extends Mapper<Text, Text, Text, Text> {
@Override
protected void map(Text key, Text value, Mapper<Text, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
package hgs.sequencefile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SequenceDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "read_sequence_file");
job.setJarByClass(hgs.sequencefile.SequenceDriver.class);
// TODO: specify a mapper
job.setMapperClass(SequnceMapper.class);
// TODO: specify a reducer
//job.setReducerClass(Reducer.class);
// TODO: specify output types
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//在这个设置读取sequencefile的inputformat,该类读取的是String泪习惯的key value
//SequenceFileAsBinaryInputFormat 该类独处的ByteWritable的key value
job.setInputFormatClass(SequenceFileAsTextInputFormat.class);
// TODO: specify input and output DIRECTORIES (not files)
FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.6.129:9000/sequence"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.6.129:9000/seqresult"));
if (!job.waitForCompletion(true))
return;
}
}
“hadoop怎么合并sequcefie并在map中读取”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注亿速云网站,小编将为大家输出更多高质量的实用文章!
亿速云「云服务器」,即开即用、新一代英特尔至强铂金CPU、三副本存储NVMe SSD云盘,价格低至29元/月。点击查看>>
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。
原文链接:http://blog.itpub.net/31506529/viewspace-2217742/