hdfs
CREATE TABLE t1(name string,id int)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ';
LOAD DATA LOCAL INPATH '/Users/***/Desktop/test.txt' INTO TABLE t1;
然后在hdfs上查看 port 50070 或dfs -ls /user/username/hive
;
java
package demoudf;
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/**
*
* @author wyq
*/
import java.util.Date;
import org.apache.hadoop.hive.ql.exec.UDF;
import org.apache.hadoop.io.Text;
import java.text.DateFormat;
public class UnixTodate extends UDF{
public Text evaluate(Text text){
if (text ==null) return null;
long timestamp = Long.parseLong(text.toString());
System.out.println(text);
System.out.println(timestamp);
return new Text(toDate(timestamp));
}
private String toDate(long timestamp){
Date date = new Date(timestamp*1000);
System.out.println(date);
return DateFormat.getInstance().format(date).toString();
}
// public static void main(String[] args){
// UnixTodate u = new UnixTodate();
// Text t = u.evaluate(new Text("1386023259550"));
// System.out.println(t.toString());
// }
}
jar cvf demoudf.jar ///.java
ADD jar /Users/wyq/Desktop/demoudf.jar;
create temporary function userdate as 'demoudf.UnixTodate';
create table test(id string, unixtime string)
row format delimited fields terminated by ',';
load data local inpath '/Users/wyq/Desktop/udf_test.txt' into table test;
select * from test;
select id,userdate(unixtime) from test;
cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
python
txt文件
jeffgeng click:13,uid:15
#!/usr/bin/python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def quchong(desc):
a=desc.split('-')
return '-'.join(set(a))
while True:
line = sys.stdin.readline()
if line == "":
break
line = line.rstrip('\n')
# your process code here
parts = line.split('\t')
parts[2]=quchong(parts[2])
print "\t".join(parts)
CREATE TABLE t3 (foo STRING, bar MAP<STRING,INT>)
row format delimited fields terminated by '\t'
COLLECTION ITEMS TERMINATED BY ','
MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE;
SELECT TRANSFORM (<columns>)
USING 'python <python_script>'
AS (<columns>)
FROM <table>;
补充
- Making Multiple Passes over the Same Data
Hive has a special syntax for producing multiple aggregations from a single pass through a source of data, rather than rescanning it for each aggregation. This change can save considerable processing time for large input data sets.
因此如下方式更加高效,并且可开启并行:
FROM pv_users
INSERT OVERWRITE TABLE pv_gender_sum
SELECT pv_users.gender, count_distinct(pv_users.userid)
GROUP BY pv_users.gender
INSERT OVERWRITE DIRECTORY '/user/data/tmp/pv_age_sum'
SELECT pv_users.age, count_distinct(pv_users.userid)
GROUP BY pv_users.age;
set hive.exec.parallel=true; //打开任务并行执行
set hive.exec.parallel.thread.number=16; //同一个sql允许最大并行度,默认为8。
- 日期处理
查看N天前的日期:
select from_unixtime(unix_timestamp('20111102','yyyyMMdd') - N*86400,'yyyyMMdd') from t_lxw_test1 limit 1;
获取两个日期之间的天数/秒数/分钟数等等:
select ( unix_timestamp('2011-11-02','yyyy-MM-dd')-unix_timestamp('2011-11-01','yyyy-MM-dd') ) / 86400 from t_lxw_test limit 1;
- left outer join
--query 1
select count(id) from
(select id from a left outer join b
on a.id=b.id and b.date='2017-10-27'
where to_date(a.adate) >= '2017-10-27' and a.date='2017-07-24'
) a
--query 2
select count(id) from
(select id from a left outer join b
on a.id=b.id and b.date='2017-10-27' and a.date='2017-07-24'
where to_date(a.adate) >= '2017-10-27'
) a
区别?where 后面跟的是过滤条件,query 1 中的a.date=’2017-07-24′, 在table scan之前就会Partition Pruner 过滤分区,所以只有’2017-07-24’下的数据会和b进行join。
而query 2中会读入所有partition下的数据,再和b join,并且根据join的关联条件只有a.date=’2017-07-24′ 的时候才会真正执行join,其余情况下又由于是left outer join, 右面会留NULL
name rlike '^[\\u4e00-\\u9fa5]+$'
select mobile from phone where mobile rlike '^\\d+$' ;
CREATE EXTERNAL TABLE if not exists push_log(
hostid STRING, dayid STRING
plmn STRING)
COMMENT ' log table'
PARTITIONED BY (hostid STRING, dayid STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001'
STORED AS TEXTFILE
LOCATION '/user/data/push';
alter table push_log add partition(hostid='$hostid', dayid='$dayid') location '/user/data/push/$hostid/$dayid';
testtext 数据wer 46 weree 78 wer 89 rr 89
create table d_part(name string) partitioned by(value string) row format delimited fields terminated by '\t' lines terminated by '\n' stored as textfile;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrick;
insert overwrite table d_part partition(value) select name,addr as value from testtext;
select * from d_part;
show partitions d_part;
hive> create table d_part2(
> name string
> )
> partitioned by(value string,dt string)
> row format delimited fields terminated by '\t'
> lines terminated by '\n'
> stored as textfile;
hive> insert overwrite table d_part2 partition(value,dt)
> select 'test' as name,
> addr as value,
> name as dt
> from testtext;
show partitions d_part2;