把作業素材dept.txt(部門表) 和 emp.txt(員工表)導入hadoop中
求各個部門的總工資(可以使用map side join 或者 reduce side join)
結果如下:
ACCOUNTING 8750
RESEARCH 6775
SALES 9400
dept.txt數據
10,ACCOUNTING,NEW YORK
20,RESEARCH,DALLAS
30,SALES,CHICAGO
40,OPERATIONS,BOSTON
emp.txt數據
7369,SMITH,CLERK,7902,17-12月-80,800,20
7499,ALLEN,SALESMAN,7698,20-2月-81,1600,300,30
7521,WARD,SALESMAN,7698,22-2月-81,1250,500,30
7566,JONES,MANAGER,7839,02-4月-81,2975,20
7654,MARTIN,SALESMAN,7698,28-9月-81,1250,1400,30
7698,BLAKE,MANAGER,7839,01-5月-81,2850,30
7782,CLARK,MANAGER,7839,09-6月-81,2450,10
7839,KING,PRESIDENT,17-11月-81,5000,10
7844,TURNER,SALESMAN,7698,08-9月-81,1500,0,30
7900,JAMES,CLERK,7698,03-12月-81,950,30
7902,FORD,ANALYST,7566,03-12月-81,3000,20
7934,MILLER,CLERK,7782,23-1月-82,1300,10
package com.bw.project;import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class MR {public static class DMapper extends Mapper<LongWritable, Text, Text, IntWritable>{Map<String, String> map = new HashMap<>();@Overrideprotected void setup(Mapper<LongWritable, Text, Text, IntWritable>.Context context)throws IOException, InterruptedException {FileSystem fs=FileSystem.getLocal(context.getConfiguration());Path[] paths = context.getLocalCacheFiles();InputStream in = fs.open(paths[0]);BufferedReader bf=new BufferedReader(new InputStreamReader(in));String read="";while ((read=bf.readLine())!=null) {String[] split = read.split(",");//context.write(new Text(split[0]), new Text("t1:"+split[1]));map.put(split[0], split[1]);}bf.close();in.close();fs.close();}@Overrideprotected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)throws IOException, InterruptedException {String[] split = value.toString().split(",");if ((map.get(split[7]))!=null) {String dname=map.get(split[7]);String dwString=dname+":"+split[5];map.put(split[7], dwString);}}@Overrideprotected void cleanup(Mapper<LongWritable, Text, Text, IntWritable>.Context context)throws IOException, InterruptedException {Collection<String> values = map.values();for (String s : values) {int sum=0;String[] split = s.split(":");for (int i = 1; i < split.length; i++) {sum+=Integer.parseInt(split[i]);}if (sum>0) {context.write(new Text(split[0]), new IntWritable(sum));}}}}public static void main(String[] args) throws Exception {Configuration conf = new Configuration();Job job = Job.getInstance(conf);job.setJarByClass(Demo.class);job.addCacheFile(new URI("dept.txt"));job.setMapperClass(DMapper.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(Text.class);job.setNumReduceTasks(0);/*job.setReducerClass(DReduce.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(IntWritable.class);*/FileInputFormat.addInputPath(job,new Path("emp.txt"));FileOutputFormat.setOutputPath(job, new Path("dept1"));job.waitForCompletion(true);}}
總結
以上是生活随笔為你收集整理的mr利用mapjoin计算出每个部门的总工资以及部门名称的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。