'''
@Company: TWL
@Author: xue jian
@Email: xuejian@kanzhun.com
@Date: 2020-04-09 19:10:23
'''
# -*- coding: UTF-8 -*-
from multiprocessing import Process, Queue
import time
import numpy as np
from sklearn.metrics import roc_auc_score
import sys
sys.path.append('../util')
from feature_handler import get_fea_index, get_fea_group, get_fea_code

#preds_out = open("deepfm_preds_out_t", 'w')

class BaseTrain():
    def __init__(self, file_path, dates, watch_num, batch_size, thread_num, read_data_parallel):
        if file_path[-1] != '/':
            file_path += '/'
        self.file_path = file_path
        self.dates = dates
        self.fea_index = get_fea_index()
        self.fea_code = get_fea_code()
        print("fea_index = ", self.fea_index)
        self.fea_group = get_fea_group()
        self.watch_num = watch_num
        self.batch_size = batch_size
        self.thread_num = thread_num
        self.read_data_parallel = read_data_parallel
        self.aucs = []
        self.losses = []
        self.date = []

    def get_train_file(self, date):
        return [self.file_path + date + ".merge"]

    def get_train_file_multi_part(self, date, part_num):
        return (self.file_path + date + "/part" + str(part_num))

    def make_label(self, l):
        if l == 'NULL':
            return -99
        return int(l != "list")


    def single_train(self):
        def read_date_data(d_f, n):
            all_d = []
            k = 0
            while k < n:
                line = d_f.readline()
                if line == "":
                    break
                line = line[:-1].split("\t")
                tmp_label = self.make_label(line[self.fea_index['deal_type']])
                if tmp_label == -99:
                    continue
                if 1 - line[self.fea_index['geek_major']].isdigit():
                    print(line[self.fea_index['geek_major']])
                    continue
                tmp_line = []
#                n_tag = False
                for j, l in enumerate(line):
                    if j == self.fea_index['deal_type']:
                        tmp_line.append(tmp_label)
                    elif l in ['NULL', '\\N', '']:
#                        n_tag = True
                        tmp_line.append(-2)
                    else:
                        tmp_line.append(l)
#                if n_tag:
#                    continue
                all_d.append(tmp_line)
                k += 1
            return all_d
        self.init_model()
        left_data = []
        for date in self.dates:
            self.date = date
            print('date = ', date)
            date_paths = self.get_train_file(date)
            for date_path in date_paths:
                with open(date_path, 'r') as date_file:
                    all_data = read_date_data(date_file, self.watch_num - len(left_data))
                    all_data = left_data + all_data
                    while len(all_data) >= self.watch_num:
                        all_pred = []
                        all_label = []
                        all_loss = []
                        all_data = np.asarray(all_data)
                        for i in range(len(all_data) // self.batch_size):
                            train_data = all_data[i * self.batch_size: (i + 1) * self.batch_size]
                            batch_l = train_data[:, self.fea_index['deal_type']].astype(np.int16)
                            pred, pred_loss = self.batch_train(train_data, batch_l)
                            #print('pred = ', pred)
                            # print "loss = ", pred_loss.mean()
                            all_label.extend(batch_l)
                            all_pred.extend(pred.tolist())
                            all_loss.append(pred_loss)

#                            batch_sess = train_data[:, self.fea_index['sessionid']]
#                            batch_jobid = train_data[:, self.fea_index['job_id']]
#                            batch_expid = train_data[:, self.fea_index['exp_id']]
#                            for si in range(len(batch_sess)):
#                                key = batch_sess[si] + '\t' + batch_jobid[si] + '\t' + batch_expid[si]
#                                value = pred[si][0]
#                                preds_out.write(key + '\t' + str(value) + '\t' + str(batch_l[si]) + '\n')

                        #pos_num = 0
                        #neg_num = 0
                        #for l in all_label:
                        #    if l == 0:
                        #        neg_num += 1;
                        #    else:
                        #        pos_num += 1;
                        #print("positive_num = ", pos_num)
                        #print("negative_num = ", neg_num)

                        auc = self.cal_auc(np.asarray(all_label), np.asarray(all_pred))
                        print("auc = ", auc)
                        print('loss = ', np.asarray(all_loss).mean())
                        self.aucs.append(auc)
                        self.losses.append(np.asarray(all_loss).mean())
                        all_data = read_date_data(date_file, self.watch_num)
                    left_data = all_data

                    date_file.close()

        left_data = np.asarray(left_data)
        for i in range(len(left_data) // self.batch_size):
            train_data = left_data[i * self.batch_size: (i + 1) * self.batch_size]
            self.batch_train(train_data,  train_data[:, self.fea_index['deal_type']].astype(np.int16))
        return self.aucs, self.losses


    def produce_data(self, data_queue, ty):
        def read_date_data(d_f):
            line = d_f.readline()
            if line == "":
                return []
            line = line[:-1].split("\t")
            tmp_line = []
            for j, l in enumerate(line):
                if j == self.fea_index['deal_type']:
                    tmp_line.append(int(l != "list"))
                elif l == 'NULL':
                    tmp_line.append(-2)
                else:
                    tmp_line.append(l)
            return tmp_line
        for date in self.dates:
            date_path = self.get_train_file_multi_part(date, ty)
            with open(date_path, 'r') as date_file:
                single_data = read_date_data(date_file)
                while len(single_data) > 0:
                    if not data_queue.full():
                        data_queue.put(single_data)
                        single_data = read_date_data(date_file)
                date_file.close()

    def init_model(self):
        print('init_model')

    def consume_data(self, data_queue, aucs, loss_f):
        self.init_model()
        batch_data = []
        label = []
        pred = []
        losses = []
        while True:
            tmp = data_queue.get()
            if len(tmp) > 0:
                batch_data.append(tmp)
            if len(batch_data) >= self.batch_size:
                batch_data = np.asarray(batch_data)
                batch_l = batch_data[:, self.fea_index['deal_type']].astype(np.int16)
                batch_pred, batch_loss = self.batch_train(batch_data[:self.batch_size], batch_l)
                batch_data = []
                label.extend(batch_l)
                pred.extend(batch_pred)
                losses.append(batch_loss)
                if len(label) >= self.watch_num:
                    tmp_auc = self.cal_auc(label, pred)
                    aucs.put(tmp_auc)
                    print('auc = ', tmp_auc)
                    print('loss = ', np.asarray(losses).mean())
                    loss_f.put(np.asarray(losses).mean())
                    label = []
                    pred = []
                    losses = []

    def cal_auc(self, label, pred):
        return roc_auc_score(np.asarray(label), np.asarray(pred))

    def cal_aucs(self, label, preds):
        tmp_aucs = []
        for i in range(len(preds)):
            tmp_aucs.append(roc_auc_score(np.asarray(label), np.asarray(preds[i])))
        return tmp_aucs

    def batch_train(self, batch_data, batch_label):
        return [], 0


    def train(self):
        if self.read_data_parallel > 0:
            data_queue = Queue(maxsize=self.batch_size * (self.thread_num + self.read_data_parallel +10))
            auc_queue = Queue()
            loss_queue = Queue()
            produces = []

            for i in range(self.read_data_parallel):
                produces.append(Process(target=self.produce_data, args=(data_queue, i)))
            processes = []
            for t_n in range(1):
                processes.append(Process(target=self.consume_data, args=(data_queue, auc_queue, loss_queue)))
            for p in produces:
                p.start()
            for p in processes:
                p.start()
            for p in produces:
                p.join()
            time.sleep(3)
            for p in processes:
                p.terminate()

            aucs = []
            losses = []
            while not auc_queue.empty():
                aucs.append(auc_queue.get())
            while not loss_queue.empty():
                losses.append(loss_queue.get())
            print("auc = ", aucs)
            print("loss = ", losses)
            return aucs, losses
        self.single_train()