hive批量检查 partition是否存在,删除不存在的 partition

hive表的数据有时会发生partition还在,但是数据已经被删除了的情况。为了找出这些partition,并删除数据已经不存在的partition,做了几个小的脚本。

先列出所有的partition.

在mysql中建立一个表,用来存储partition检查的结果。
status: -1:未知 0:不存在 1:存在 2:dropped

create table meta.partition_loc (
  `id` int(11) unsigned NOT NULL AUTO_INCREMENT,
  `LOCATION` varchar(255) COLLATE utf8_bin NOT NULL DEFAULT   ,
  `PART_NAME` varchar(255) COLLATE utf8_bin NOT NULL DEFAULT   ,
  `CREATE_TIME` int(11) NOT NULL,
  `TBL_NAME` varchar(128) COLLATE utf8_bin NOT NULL DEFAULT   ,
  `DB_NAME` varchar(128) COLLATE utf8_bin NOT NULL DEFAULT   ,
  `STATUS` int(11) NOT NULL DEFAULT  -1 ,
  PRIMARY KEY (`id`)
);

从hive的metastore的数据库中,找到所有的partition,写入这个表。

insert into meta.partition_loc
SELECT 
  null as id,
  s.`LOCATION`, 
  p.`PART_NAME`,
  p.`CREATE_TIME`,
  t.`TBL_NAME` ,
  d.`NAME`,
  -1 as `status`
from 
  hive.SDS s 
  JOIN hive.`PARTITIONS` p on s.SD_ID = p.SD_ID 
  join hive.TBLS t on p.tbL_id = t.tbl_id 
  JOIN hive.DBS d on t.DB_ID=d.DB_ID
;

逐个检查目录是否存在,并更新表 meta.partition_loc

#!/usr/bin/env python
import sys
import pymysql
from snakebite.client import AutoConfigClient as HDFSClient


client = HDFSClient()


dbconn = pymysql.connect(
    host=‘$host ,
    user=‘$user ,
    password=‘****** ,
    database= meta ,
    port=3306
)

cursor = dbconn.cursor()

sql =    SELECT id, LOCATION, PART_NAME, TBL_NAME, DB_NAME
    FROM meta.partition_loc WHERE STATUS = -1 limit 100   

update_sql =    UPDATE meta.partition_loc SET STATUS=%s WHERE id=%s   

try:
    n = 0
    while True:
        cursor.execute(sql)
        rows = cursor.fetchall()

        for row in rows: # 拿出来一批partition
            _id, location, part_name, tbl_name , db_name = row
            if location.startswith( hdfs://nameservice1/ ): # 去除hdfs://xxxx前缀
                # 检查是否存在。 此处执行时会频繁请求NameNode,大致每秒300~500次。可以在业务低峰期时进行
                # 这里替换成实际的nameservice名称
                s = client.test(location.replace( hdfs://nameservice1/ ,  / ), exists=True) 
            else:
                s = False
            cursor.execute(update_sql, (int(True), _id)) # 更新数据库
            n += 1
        print( handled , n)


        dbconn.commit()
        if not rows:
            break
except Exception, e:
    print(e.message)
    dbconn.rollback()
finally:
    cursor.close()
    dbconn.close()

删除分区, 生成SQL,用hive来执行

import sys
import os
import pymysql
from snakebite.client import AutoConfigClient as HDFSClient


def to_part_spec(part_name):
    t = part_name.split( / )
    s = []
    for part_ in t:
        p = part_.split( = )
        s.append("%s= %s " % (p[0], p[1]))
    return "PARTITION(%s)" %  , .join(s)


client = HDFSClient()


dbconn = pymysql.connect(
    host=‘$host ,
    user=‘$user ,
    password=‘****** ,
    database= meta ,
    port=3306
)


cursor = dbconn.cursor()

update_sql =    UPDATE meta.partition_loc SET STATUS=%s WHERE id=%s   


drop_sql =    SELECT id, PART_NAME, concat(DB_NAME,  . , TBL_NAME)
    FROM meta.partition_loc WHERE STATUS = 0 limit 100   

try:
    n = 0
    for _b in range(10):
        cursor.execute(drop_sql)
        rows = cursor.fetchall()
        if not rows:
            break
            #print  no rows 


        data = {} # key: table_name, value: list of partitions
        for row in rows:
            _id, part_name, tbl = row
            data.setdefault(tbl, []).append(part_name)
            n += 1

        # 先生成一个sql文件,再用hive去执行
        with open( /tmp/remove-partition.sql ,  w ) as fout:
            for tbl in data:
                sql =    ALTER TABLE %s DROP 
%s;
    % (tbl,  ,
 .join([to_part_spec(p) for p in data[tbl]]))
                fout.write(sql)

        # 执行sql中的语句来删除空的partition
        os.system( hive -f /tmp/remove-partition.sql )


        for row in rows:
            cursor.execute(update_sql, (2, row[0]))
        dbconn.commit()

        print( dropped , n)
except Exception, e:
    print(e.message)
    dbconn.rollback()
finally:
    cursor.close()
    dbconn.close()

© 版权声明

相关文章

暂无评论

none
暂无评论...