# coding=utf-8
from bigflow import base
from bigflow import input
from bigflow import output
from bigflow import base, schema, transforms
from bigflow.transforms import *
from bil.load import wise_join_word_file
from bil.region.bgfl import point_join_region

"""
作用：形成群体分析接口的上传文档：北京的搜索关键词为"养老院"人群的cuid(未去重)
路径：${out_root}/cal/xxx
文件格式：${id} \t ${id_type}
"""
out_root = "afs://wuge.afs.baidu.com:9902/user/bil-plat/users/v_libin09/***"
a = 2021042408
b = 2021042410


def create_pipeline():
    udw_conf2 = {"auth_type": "baas_identity_code",
                 "baas_identity_code": "9U+h7CXfgnWe+wCGDgJP",
                 "baas_user": "v_libin09",
                 "baas_group": "g_bil"}
    job_conf = {'mapred.job.map.capacity': '5000',
                'mapred.job.reduce.capacity': '5000',
                'mapred.map.tasks': '5000',
                'mapred.reduce.tasks': '5000',
                'mapred.job.priority': 'VERY_HIGH', }

    tmp_file = "afs://wuge.afs.baidu.com:9902/user/bil-plat/users/v_libin09/***"
    pipeline = base.Pipeline.create("DAGMR", tmp_data_path=tmp_file, udw_conf=udw_conf2, default_concurrency=5000,
                                    hadoop_job_conf=job_conf)
    pipeline.add_directory(".", './')
    return pipeline


def wise_func(date, pipeline):
    wise_all = wise_join_word_file(pipeline, str(date), ["city", "loc", "time", "cuid"],
                                   "/home/users/v_libin09/libin/test1/word_dict.txt") \
        .filter(lambda x: x["city"] == "北京") \
        .filter(lambda x: x["loc"] != () and x["loc"] is not None) \
        .distinct() \
        .map(lambda x: (x["cuid"], x))
    return wise_all


def cal_iter_day():
    # range: 左闭右开：
    for date in range(a, b):
        pipeline = create_pipeline()
        wise_all = wise_func(date, pipeline)
        path = out_root + "/" + str(date) + "/"
        pipeline.write(wise_all, output.TextFile(path).partition(n=10))
        pipeline.run()


def cal_total():
    pipeline = create_pipeline()
    total = pipeline.parallelize([])
    for date in range(a, b):
        path = out_root + "/" + str(date)
        wise_all = pipeline.read(input.TextFile(path)).map(lambda x: eval(x)).map(lambda x: x[0]+"\t"+"cuid")
        total = total.union(wise_all)

    # 要不要呢???
    total.distinct()
    path = out_root+"/"+"cal"+"/"
    pipeline.write(total, output.TextFile(path).partition(n=1))
    pipeline.run()


if __name__ == '__main__':
    cal_iter_day()
    cal_total()
