'''
@Company: TWL
@Author: xue jian
@Email: xuejian@kanzhun.com
@Date: 2020-04-09 19:40:18
'''
# -*- coding: UTF-8 -*-
import numpy as np
buckets = ['C1_1', 'E1_1', 'F1_1', 'M1_1', 'M2_1', 'S1_2', 'SX_1', 'T1_2', 'T1_4', 'T2_1', 'X1_1', 'X2_1', 'Y1_2', 'C2_1', 'E2_1', 'F2_1', 'M1_2', 'S1_1', 'S2_1', 'T1_1', 'T1_3', 'T1_5', 'WM_1', 'X1_2', 'Y1_1', 'Y2_1']
# f_path = "/data1/wuxiushan/bossrec_train/"

# dates = ["2019-05-16"]
#
# w_path = "/home/zangruozhou/"
# w_f = open(w_path, 'w')
# bucket_num = {'SX_1': 0.02318156248501368, 'F2_1': 0.027327029178836088, 'S2_1': 0.11241390837451178,
#               'C1_1': 0.028856959575035812, 'F1_1': 0.016861639992149874, 'S1_1': 0.16806311834815577,
#               'M1_2': 0.010630781500653417, 'M1_1': 0.0067264003653597915, 'S1_2': 0.08569158411611763,
#               'X2_1': 0.05450543527271555, 'X1_1': 0.040090746760028766, 'X1_2': 0.02878909311494591,
#               'C2_1': 0.04715445407371602, 'WM_1': 0.015248292550923102, 'E2_1': 0.06463863238338254,
#               'M2_1': 0.018421950670180435, 'T1_4': 0.015561432585794174, 'T1_5': 0.013113193806253512,
#               'E1_1': 0.03098407705922442, 'T1_1': 0.012068678523306843, 'T1_2': 0.03195238417757848,
#               'T1_3': 0.014337519164036255, 'Y1_2': 0.033186493015950815, 'Y1_1': 0.018207314837245622,
#               'Y2_1': 0.0382135371309006, 'T2_1': 0.043773780937983134}

def count_bucket_rate(w_f):
    count_bucket = {}
    count_sum = 0
    for bucket in buckets:
        count = 0
        for _, _ in enumerate(open(w_f + bucket + "/out", 'r')):
            count += 1
        count_bucket [bucket] = count
        count_sum += count
    for k, v in count_bucket.items():
        count_bucket[k] = v/float(count_sum)
    return count_bucket


def merge_dates_bucket(f_path, dates, save_path, bucket_num={}):
    if f_path[-1] != '/':
        f_path += '/'
    for date in dates:
        w_f = open(save_path + date + ".merge", 'w')
        date_path = f_path + date + "/"
        if bucket_num == {}:
            bucket_num = count_bucket_rate(date_path)
        file_dict = {}
        for bucket in buckets:
            p = date_path + bucket + "/out"
            file_dict[bucket] = open(p, 'r')
        tmp_sum_data = []
        for bucket in buckets:
            num = int(100000 * bucket_num[bucket])
            # print("num = ", num)
            for i in range(num):
                tmp_sum_data.append(file_dict[bucket].readline())
            # print("tmp_sum_data len: ", len(tmp_sum_data))
        # print("tmp_sum_data.len = ", len(tmp_sum_data))
        while len(tmp_sum_data) > 1:
            tmp_sum_data = np.asarray(tmp_sum_data)
            np.random.shuffle(tmp_sum_data)
            w_f.writelines(tmp_sum_data.tolist())
            tmp_sum_data = []
            for bucket in buckets:
                num = int(100000 * bucket_num[bucket])
                for i in range(num):
                    tmp_l = file_dict[bucket].readline()
                    if tmp_l == "":
                        break
                    tmp_sum_data.append(tmp_l)
        w_f.close()
        for bucket in buckets:
            file_dict[bucket].close()


if __name__ == '__main__':
    merge_dates_bucket("/data1/wuxiushan/bossrec_train/", ["2019-06-13"], "/data1/home/zangruozhou/")