MapReduce从HBase读写数据简单示例

  就用单词计数这个例子,需要统计的单词存在HBase中的word表,MapReduce执行的时候从word表读取数据,统计结束后将结果写入到HBase的stat表中。

 

  1、在eclipse中建立一个hadoop项目,然后从hbase的发布包中引入如下jar

hbase-0.94.13.jar
zookeeper-3.4.5.jar
protobuf-java-2.4.0a.jar
guava-11.0.2.jar

 

  2、在HBase中建立相关的表和初始化测试数据                                         

package cn.luxh.app;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;

/**
 * 
 * @author Luxh
 *
 */
public class InitData {
    
    public static void main(String[] args) throws IOException {
        //创建一个word表,只有一个列族content
        HBaseUtil.createTable("word","content");
        
        //获取word表
        HTable htable = HBaseUtil.getHTable("word");
        htable.setAutoFlush(false);
        
        //创建测试数据
       List<Put> puts = new ArrayList<Put>();
       
       Put put1 = HBaseUtil.getPut("1","content",null,"The Apache Hadoop software library is a framework");
       Put put2 = HBaseUtil.getPut("2","content",null,"The common utilities that support the other Hadoop modules");
       Put put3 = HBaseUtil.getPut("3","content",null,"Hadoop by reading the documentation");
       Put put4 = HBaseUtil.getPut("4","content",null,"Hadoop from the release page");
       Put put5 = HBaseUtil.getPut("5","content",null,"Hadoop on the mailing list");
       
       puts.add(put1);
       puts.add(put2);
       puts.add(put3);
       puts.add(put4);
       puts.add(put5);
       
       //提交测试数据
      htable.put(puts);
      htable.flushCommits();
      htable.close();
        //创建stat表,只有一个列祖result
      HBaseUtil.createTable("stat","result");
    }
}

  1)代码中的HBaseUtil工具类参考:http://www.cnblogs.com/luxh/archive/2013/04/16/3025172.html

  2)执行上面的程序后,查看HBase中是否已创建成功

hbase(main):012:0> list
TABLE
stat
word
2 row(s) in 0.4730 seconds

  3)查看word中的测试数据

hbase(main):005:0> scan 'word'
ROW                    COLUMN+CELL                                                     
 1                     column=content:, timestamp=1385447676510, value=The Apache Hadoo
                       p software library is a framework                               
 2                     column=content:, timestamp=1385447676510, value=The common utili
                       ties that support the other Hadoop modules                      
 3                     column=content:, timestamp=1385447676510, value=Hadoop by readin
                       g the documentation                                             
 4                     column=content:, timestamp=1385447676510, value=Hadoop from the 
                       release page                                                    
 5                     column=content:, timestamp=1385447676510, value=Hadoop on the ma
                       iling list                                                      
5 row(s) in 5.7810 seconds

 

  3、MapReduce程序                                                                          

package cn.luxh.app;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;


/**
 * @author Luxh
 *
 */
public class WordStat {
    
    /**
     * TableMapper<Text,IntWritable>  Text:输出的key类型,IntWritable:输出的value类型
     */
    public static class MyMapper extends TableMapper<Text,IntWritable>{
        
        private static IntWritable one = new IntWritable(1);
        private static Text word = new Text();
        
        @Override
        protected void map(ImmutableBytesWritable key, Result value,
                Context context)
                throws IOException, InterruptedException {
            //表里面只有一个列族,所以我就直接获取每一行的值
            String words = Bytes.toString(value.list().get(0).getValue());
            StringTokenizer st = new StringTokenizer(words); 
            while (st.hasMoreTokens()) {
                 String s = st.nextToken();
                 word.set(s);
                 context.write(word, one);
            }
        }
    }
    
    /**
     * TableReducer<Text,IntWritable>  Text:输入的key类型,IntWritable:输入的value类型,ImmutableBytesWritable:输出类型
     */
    public static class MyReducer extends TableReducer<Text,IntWritable,ImmutableBytesWritable>{
        
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values,
                Context context)
                throws IOException, InterruptedException {
            
            int sum = 0;
            for(IntWritable val:values) {
                sum+=val.get();
            }
            //添加一行记录,每一个单词作为行键
            Put put = new Put(Bytes.toBytes(key.toString()));
            //在列族result中添加一个标识符num,赋值为每个单词出现的次数
            //String.valueOf(sum)先将数字转化为字符串,否则存到数据库后会变成\x00\x00\x00\x这种形式
            //然后再转二进制存到hbase。
            put.add(Bytes.toBytes("result"), Bytes.toBytes("num"), Bytes.toBytes(String.valueOf(sum)));
            context.write(new ImmutableBytesWritable(Bytes.toBytes(key.toString())),put);
        }
    }
    
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        
        Configuration conf = HBaseConfiguration.create();
        Job job = new Job(conf,"wordstat");
        job.setJarByClass(Blog.class);
        
        
        Scan scan = new Scan();
        //指定要查询的列族
        scan.addColumn(Bytes.toBytes("content"),null);
        //指定Mapper读取的表为word
        TableMapReduceUtil.initTableMapperJob("word", scan, MyMapper.class, Text.class, IntWritable.class, job);
     //指定Reducer写入的表为stat TableMapReduceUtil.initTableReducerJob(
"stat", MyReducer.class, job); System.exit(job.waitForCompletion(true)?0:1); } }

  等待程序执行结束,查看统计表stat

hbase(main):014:0> scan 'stat'
ROW                    COLUMN+CELL                                                     
 Apache                column=result:num, timestamp=1385449492309, value=1             
 Hadoop                column=result:num, timestamp=1385449492309, value=5             
 The                   column=result:num, timestamp=1385449492309, value=2             
 a                     column=result:num, timestamp=1385449492309, value=1             
 by                    column=result:num, timestamp=1385449492309, value=1             
 common                column=result:num, timestamp=1385449492309, value=1             
 documentation         column=result:num, timestamp=1385449492309, value=1             
 framework             column=result:num, timestamp=1385449492309, value=1             
 from                  column=result:num, timestamp=1385449492309, value=1             
 is                    column=result:num, timestamp=1385449492309, value=1             
 library               column=result:num, timestamp=1385449492309, value=1             
 list                  column=result:num, timestamp=1385449492309, value=1             
 mailing               column=result:num, timestamp=1385449492309, value=1             
 modules               column=result:num, timestamp=1385449492309, value=1             
 on                    column=result:num, timestamp=1385449492309, value=1             
 other                 column=result:num, timestamp=1385449492309, value=1             
 page                  column=result:num, timestamp=1385449492309, value=1             
 reading               column=result:num, timestamp=1385449492309, value=1             
 release               column=result:num, timestamp=1385449492309, value=1             
 software              column=result:num, timestamp=1385449492309, value=1             
 support               column=result:num, timestamp=1385449492309, value=1             
 that                  column=result:num, timestamp=1385449492309, value=1             
 the                   column=result:num, timestamp=1385449492309, value=4             
 utilities             column=result:num, timestamp=1385449492309, value=1             
24 row(s) in 0.7970 seconds

 

 

    原文作者:MapReduce
    原文地址: https://www.cnblogs.com/luxh/p/3443437.html
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞