# coding=utf-8
from bigflow import base
from bigflow import input
from bigflow import output
from bigflow import base, schema, transforms
from bigflow.transforms import *
from bil.load import wise_join_word_file
from bil.region.bgfl import point_join_region

"""
作用：每周一次的宜昌市疾控关键词统计报告
路径：$out_root/cal/xxx
文件格式：Schema: ${id} \t ${id_type}
"""
out_root = "afs://wuge.afs.baidu.com:9902/user/bil-plat/users/v_libin09/***"
a = 20210424
b = 20210431


def create_pipeline():
    udw_conf2 = {"auth_type": "baas_identity_code", \
                 "baas_identity_code": "9U+h7CXfgnWe+wCGDgJP", \
                 "baas_user": "v_libin09", \
                 "baas_group": "g_bil"}
    job_conf = {'mapred.job.map.capacity': '5000', \
                'mapred.job.reduce.capacity': '5000', \
                'mapred.map.tasks': '5000', \
                'mapred.reduce.tasks': '5000', \
                'mapred.job.priority': 'VERY_HIGH', }

    tmp_file = "afs://wuge.afs.baidu.com:9902/user/bil-plat/users/v_libin09/wise_data/medical/tw1"
    pipeline = base.Pipeline.create("DAGMR", tmp_data_path=tmp_file, udw_conf=udw_conf2, default_concurrency=5000,
                                    hadoop_job_conf=job_conf)
    pipeline.add_directory(".", './')
    return pipeline


def cal_iter_day():
    # range: 左闭右开：
    for date in range(a, b):
        pipeline = create_pipeline()
        wise_all = wise_func(date, pipeline)

        path = out_root + "/" + str(date) + "/"
        pipeline.write(wise_all, output.TextFile(path).partition(n=100))
        pipeline.run()


def wise_func(date, pipeline):
    wise_all = wise_join_word_file(pipeline, str(date), ["city", "loc", "time"],
                                   "/home/users/v_libin09/libin/medical/medical_dict.txt") \
        .filter(lambda x: x["city"] == "宜昌") \
        .filter(lambda x: x["loc"] != () and x["loc"] is not None) \
        .distinct() \
        .map(lambda x: (x["loc"], x))
    return wise_all


def cal_total():
    pipeline = create_pipeline()
    total = pipeline.parallelize([])
    for date in range(a, b):
        path = out_root + "/" + str(date)
        wise_all = pipeline.read(input.TextFile(path)).map(lambda x: eval(x))
        total = total.union(wise_all)

    point_regions = join_func(pipeline, total)
    # path = "afs://wuge.afs.baidu.com:9902/user/bil-plat/users/v_libin09/cal_test/medical/ow_test1"
    path = out_root+"/"+"cal"+"/"
    pipeline.write(point_regions, output.TextFile(path).partition(n=100))
    pipeline.run()


def cal3(p):
    return p.apply(transforms.group_by_key).apply_values(transforms.count)


def cal2(p):
    return p.map(lambda x: (x[0], (x[1], 1))).group_by_key().apply_values(cal3)


def cal1(p):
    return p.map(lambda x: (x[0], (x[1], x[2]))).group_by_key().apply_values(cal2)


def change(p):
    dic = {420502: "西陵区", 420503: "伍家岗区", 420504: "点军区", 420505: "猇亭区", 420506: "夷陵区", 420525: "远安县", \
           420526: "兴山县", 420527: "秭归县", 420528: "长阳土家族自治县", 420529: "五峰土家族自治县", 420581: "宜都市", 420582: "当阳市",
           420583: "枝江市"}
    if p[0] in dic:
        return [p[0], p[1][0], p[1][1][0], p[1][1][1][0], p[1][1][1][1]]


def join_func(pipeline, total):
    point_regions = point_join_region(pipeline, total, max_level=5, concurrency=1000, is_shuffle=False) \
        .map(lambda x: (x["county_id"], (x["town_id"], x["block_id"], x["elem"]["word"][1]))) \
        .group_by_key() \
        .apply_values(cal1) \
        .flatten() \
        .map(change) \
        .filter(lambda x: x is not None) \
        .map(lambda x: str(x[0]) + "\t" + str(x[1]) + "\t" + str(x[2]) + "\t" + x[3] + "\t" + str(x[4]))
    return point_regions


if __name__ == '__main__':
    cal_iter_day()
    cal_total()
