# -*-coding:utf-8-*-
__author__ = 'Administrator'
import os
import json
import jieba
import random
import numpy as np
# from collections import defaultdict
import tensorflow as tf
# from tensorflow.contrib.nn import
from config import Opt
from tensorflow.contrib import rnn

basketball_word = ['拿球', '两罚', '三罚', '大帽', '顺下', '得球', '无球', '挡拆', '秒传', '吃饼', '空接', '溜底线', '空切', '造犯规', '背打', '追梦',
                   '泡椒', '老詹', '登哥', '霍乐迪',
                   '艾克萨姆', '丁翁迪', '杰伦', '路威', '维金斯', '补篮', '补扣', '字母哥', '威少', '球哥', '给力芬', '湖人', '灰熊', '迈基吉', '鲁尼',
                   '戈伯特', '庄神', '阿德托昆博', '科伦姆', '跨下运球',
                   '伯克', '击地', '弗尼耶', '三不沾', '瓦蓝', '断球', '拉人', '干拔', '后撤步', '锡伯杜', '帕克', '霍利迪', '勾手']
team = ['老鹰队', '黄蜂队', '热火队', '魔术队', '奇才队', '公牛队', '骑士队', '活塞队', '步行者队', '雄鹿队', '篮网队', '凯尔特人队', '尼克斯队', '76人队', '猛龙队',
        '勇士队', '快船队', '湖人队', '太阳队', '国王队', '掘金队', '森林狼队', '雷霆队', '开拓者队', '爵士队', '小牛队', '火箭队', '灰熊队', '鹈鹕队', '马刺队']
team2 = ['老鹰', '黄蜂', '热火', '魔术', '奇才', '公牛', '骑士', '活塞', '步行者', '雄鹿', '篮网', '凯尔特人', '尼克斯', '76人', '猛龙', '勇士', '快船',
         '湖人', '太阳', '国王', '掘金', '森林狼', '雷霆', '开拓者', '爵士', '小牛', '火箭', '灰熊', '鹈鹕', '马刺']


def make_news():
    print('勇士第一波的进攻！！杜兰特妙传底角埋伏的库里！！先拔头筹！！泡椒吸引防守妙传施罗德！！空位三分出手！！杜兰特后撤步中投！！稳稳命中！！这样首节打完！！勇士领先了8分！！')
    print('进入第二节的争夺！！克莱中路切入迎着诺埃尔起飞！！上篮得手！还有加罚！！阿布里内斯妙传篮下亚当斯！！轻松放篮得手！！杜兰特接球！拉开单打！！后仰中投得手！！半场打完！！勇士领先了10分！！')
    print(
        '来吧！下半场开打！！泡椒拉开！单打杜兰特！！变向！突进去小抛投！！稳稳命中！！杜兰特接应琼斯的分球！！急停跳投也有！！库里传球被亚当斯切掉！！泡椒过来再分施罗德！！追身三分！！一箭穿心！！三节打完了！！勇士领先了4分！！')
    print('第四节大决战！！开始利文斯顿妙传空位的克莱！！三分拔起来就投！！之后杜兰特拉开单打阿布里内斯！！强起！造了犯规！！杜兰特两罚都有！！1分27秒！！杜兰特吸引包夹妙传卢尼！！篮下强攻得手！！最终勇士取得了胜利')


def makeword2id():
    file = open(r'.\textlive\xinlang\156254', encoding='utf-8')
    end = []
    quarter1 = []
    quarter2 = []
    quarter3 = []
    quarter4 = []
    quarter1_cut = []
    quarter2_cut = []
    quarter3_cut = []
    quarter4_cut = []
    start = []
    word2id = {}
    index = 0
    for l in file:
        line = l.split('#')
        if len(line) < 3:
            continue
        if line[-2] == '未开始':
            start.append(l)
        if line[-2] == '第一节':
            quarter1.append(l)
        if line[-2] == '第二节':
            quarter2.append(l)
        if line[-2] == '第三节':
            quarter3.append(l)
        if line[-2] == '第四节':
            quarter4.append(l)
        if line[-2] == '已结束':
            end.append(l)
        sentence = jieba.lcut(line[0])
        print(sentence)
        for s in sentence:
            if s not in word2id.keys():
                word2id[s] = index
                index += 1
        word2id['#'] = len(word2id)
        if line[-2] == '第一节':
            quarter1_cut.append(sentence)
        if line[-2] == '第二节':
            quarter2_cut.append(sentence)
        if line[-2] == '第三节':
            quarter3_cut.append(sentence)
        if line[-2] == '第四节':
            quarter4_cut.append(sentence)
    return quarter1, quarter2, quarter3, quarter4, start, end, quarter1_cut, quarter2_cut, quarter3_cut, quarter4_cut, word2id


def make_word_vector():
    file = os.listdir(r'.\textlive\xinlang')
    word2id = {}
    i = 0
    name_file = open('.\playerInfo\CnName', encoding='utf-8')
    name = []
    for n in name_file:
        name.append(n.strip().split('-')[-1])
        name.append(n.strip().split('-')[0])
    basketball_word.extend(name)
    basketball_word.extend(team)
    basketball_word.extend(team2)
    jieba.load_userdict(basketball_word)
    for f in file:
        if f.startswith('2'):
            file2 = open('.\\textlive\\xinlang\\' + f, encoding='utf-8')
            score1 = 0
            score2 = 0
            for line in file2:
                try:
                    data = line.split('\t')
                    if len(data) < 4:
                        continue
                    if data[2] == '':
                        continue
                    a = data[3].split('-')[0]
                    b = data[3].split('-')[1]
                    if a == '':
                        continue
                    word = jieba.lcut(data[1])
                    for w in word:
                        if w not in word2id.keys():
                            word2id[w] = i
                            i += 1
                except:
                    print(line)
    file3 = open('.\word2id4', encoding='utf-8', mode='w')
    json.dump(word2id, file3, ensure_ascii=False)


def make_training_data():
    file = os.listdir(r'.\textlive\xinlang')
    all_data = {}
    name_file = open('.\playerInfo\CnName', encoding='utf-8')
    name = []
    for n in name_file:
        name.append(n.strip().split('-')[-1])
        name.append(n.strip().split('-')[0])
    basketball_word.extend(name)
    basketball_word.extend(team)
    basketball_word.extend(team2)
    jieba.load_userdict(basketball_word)
    for f in file:
        if f.startswith('2'):
            content = []
            label = []
            file2 = open('.\\textlive\\xinlang\\' + f, encoding='utf-8')
            score1 = 0
            score2 = 0
            part = []
            text = file2.readlines()
            text.reverse()
            for line in text:
                try:
                    data = line.split('\t')
                    if len(data) < 4:
                        continue
                    if data[2] == '':
                        continue
                    a = data[3].split('-')[0]
                    b = data[3].split('-')[1]
                    if a == '':
                        continue
                    word = jieba.lcut(data[1])
                    if int(a) != score1 or int(b) != score2:
                        part.extend(word)
                        content.append(part)
                        label.append(random.choice([0, 1]))
                        part = []
                        score1 = int(a)
                        score2 = int(b)
                    else:
                        part.extend(word)
                except:
                    print(line)
            all_data[f] = {}
            all_data[f]['content'] = content
            all_data[f]['label'] = label
    file3 = open(r'.\train_data', encoding='utf-8', mode='w')
    json.dump(all_data, file3, ensure_ascii=False)


def get_batch():
    word2id = json.load(open('.\word2id4', encoding='utf-8'))
    data = json.load(open(r'.\train_data', encoding='utf-8'))
    train_data = []
    train_data_length = []
    id = random.choice(list(data.keys())[:100])
    while len(data[id]['content']) < Opt.batch_size:
        id = random.choice(list(data.keys())[:100])
    index = np.random.randint(0, len(data[id]['content']), Opt.batch_size)
    temp_data = np.array(data[id]['content'])[index]
    label = np.array(data[id]['label'])[index]
    label_m = np.zeros([Opt.batch_size, Opt.class_num])
    label_m_one = -np.ones([Opt.batch_size])
    for i in range(Opt.batch_size):
        label_m[i, label[i]] = 1
        if label[i] == 1:
            label_m_one[i] = 1
    for d in temp_data.tolist():
        sentence = []
        for word in d:
            sentence.append(word2id[word])
        train_data_length.append(len(sentence))
        if len(sentence) > Opt.set_size:
            sentence = sentence[:Opt.set_size]
        if len(sentence) < Opt.set_size:
            for i in range(Opt.set_size - len(sentence)):
                sentence.append(len(word2id))
        train_data.append(sentence)
    print(len(train_data))
    print(len(train_data))
    return train_data, label.tolist(), train_data_length, label_m, label_m_one.tolist()


class NewsNet(object):
    def __init__(self, type=1):
        self.make_data()
        self.encoder()
        self.decoder(type)
        self.get_loss(type)
        self.get_accuracy(type)
        self.optimizer()
        print('新闻网络构建')

    def encoder(self):
        with tf.variable_scope('encoder') as scope:
            cell_fw = rnn.LSTMCell(Opt.fw_size, name='fw')
            cell_bw = rnn.LSTMCell(Opt.bw_size, name='bw')
            initial_state_fw = cell_fw.zero_state(Opt.batch_size, dtype=tf.float32)
            initial_state_bw = cell_bw.zero_state(Opt.batch_size, dtype=tf.float32)
            self.outputs, self.states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, self.train_data_vector,
                                                                        self.train_data_length, initial_state_fw,
                                                                        initial_state_bw)
            # print(self.outputs.shape)
            # print(self.states[0].shape)
            self.sentence = tf.concat([self.states[0][0], self.states[1][0]], 1)

    def decoder(self, type=1):
        if type == 1:
            with tf.variable_scope('decoder1', reuse=tf.AUTO_REUSE) as scope:
                w_1 = tf.get_variable("w_1", [Opt.unit_1, Opt.unit_2])
                b_1 = tf.get_variable("b_1", [Opt.b_1])
                r1 = tf.matmul(self.sentence, w_1) + b_1
                r1_ac = tf.nn.relu(r1)
                w_2 = tf.get_variable("w_2", [Opt.unit_2, Opt.class_num])
                b_2 = tf.get_variable("b_2", [Opt.class_num])
                self.score = tf.matmul(r1_ac, w_2) + b_2
        elif type == 2:
            with tf.variable_scope('decoder2', reuse=tf.AUTO_REUSE) as scope:
                w_1 = tf.get_variable("w_1", [Opt.unit_1, 1])
                b_1 = tf.get_variable("b_1", [1])
                self.r1 = tf.matmul(self.sentence, w_1) + b_1
                self.l2_norm = tf.reduce_sum(tf.square(w_1))
                self.classification_term = tf.reduce_mean(
                    tf.maximum(0., tf.subtract(2., tf.multiply(self.r1, self.label_one))))
        else:
            # with tf.variable_scope('decoder3', reuse=tf.AUTO_REUSE) as scope:
            #     w_1 = tf.get_variable("w_1", [Opt.unit_1, 1])
            #     b_1 = tf.get_variable("b_1", [1])
            #     r1 = tf.matmul(self.sentence, w_1) + b_1
            pass

    def make_data(self):
        self.train_data = tf.placeholder(tf.int32, [None, Opt.set_size])
        self.label = tf.placeholder(tf.int32, [None])
        self.label_one = tf.placeholder(tf.float32, [None])
        self.train_data_length = tf.placeholder(tf.int32, [None])
        self.label_m = tf.placeholder(tf.float32, [None, Opt.class_num])
        with tf.variable_scope('layer_embedding', reuse=tf.AUTO_REUSE) as scope:
            self.vector = tf.get_variable('embedding', shape=[Opt.word_num, Opt.word_dim], dtype=tf.float32,
                                          initializer=tf.truncated_normal_initializer(), trainable=True)

            self.train_data_vector = tf.nn.embedding_lookup(self.vector, self.train_data)

    def get_loss(self, type=1):
        if type == 1:
            with tf.variable_scope('decoder1', reuse=tf.AUTO_REUSE) as scope:
                outputs_softmax = tf.nn.softmax(self.score, axis=1)
                outputs_softmax = tf.clip_by_value(outputs_softmax, 1e-10, 1.0 - 1e-10)
                self.loss = -tf.reduce_mean(tf.reduce_sum(self.label_m * tf.log(outputs_softmax), 1))
        elif type == 2:
            with tf.variable_scope('decoder2', reuse=tf.AUTO_REUSE) as scope:
                self.loss = tf.add(self.classification_term, tf.multiply(0.5, self.l2_norm))
        else:
            pass

    def get_accuracy(self, type=1):
        if type == 1:
            with tf.variable_scope('decoder1', reuse=tf.AUTO_REUSE) as scope:
                self.predict = tf.argmax(self.score, axis=1, output_type=tf.int32)
                self.accuracy = tf.reduce_mean(
                    tf.cast(tf.equal(tf.argmax(self.score, axis=1, output_type=tf.int32), self.label), tf.float32))
        elif type == 2:
            with tf.variable_scope('decoder2', reuse=tf.AUTO_REUSE) as scope:
                one = tf.ones_like(self.r1)
                zero = tf.zeros_like(self.r1)

                self.predict = tf.where(self.r1 < 0, x=zero, y=one)

                self.accuracy = tf.reduce_mean(
                    tf.cast(tf.equal(tf.cast(self.predict, tf.int32), self.label), tf.float32))
        else:
            pass

    def optimizer(self):
        lr = tf.Variable(Opt.lr, trainable=False)
        self.lr_decay = lr.assign(lr * 0.99)
        self.opt = tf.train.AdamOptimizer(lr).minimize(self.loss)

    def run(self, session, train_time=Opt.train_time):
        loss_list = []
        total_list = []
        for i in range(train_time):
            train_data, label, train_data_length, label_m, label_m_one = get_batch()
            loss, _, acc, predict = session.run([self.loss, self.opt, self.accuracy, self.predict],
                                                feed_dict={self.train_data: train_data, self.label: label,
                                                           self.train_data_length: train_data_length,
                                                           self.label_m: label_m, self.label_one: label_m_one})
            # print(s)
            TP = 0
            FP = 0
            TN = 0
            FN = 0
            for i, j in zip(predict, label):
                if i == 1 and j == 1:
                    TP += 1
                if i == 0 and j == 0:
                    TN += 1
                if i == 1 and j == 0:
                    FP += 1
                if i == 0 and j == 1:
                    FN += 1
            print('准确率为:%f' % (acc))
            print('精确率为:%f' % ((TP + 1) / (TP + FP + 1)))
            print('召回率为:%f' % ((TP + 1) / (TP + FN + 1)))
            print('F1为:%f' % (TP * 2 / (TP * 2 + FP + FN)))

            print('loss：%f' % loss)
            loss_list.append(loss)
            total_list.append((acc, (TP + 1) / (TP + FP + 1), (TP + 1) / (TP + FN + 1), TP * 2 / (TP * 2 + FP + FN)))
            if len(loss_list) > 5:
                if loss_list[-1] > loss_list[-2] and loss_list[-1] > loss_list[-3] and loss_list[-1] > loss_list[-4]:
                    lr = session.run(self.lr_decay)
                    print('learning_rate递减:%f' % lr)
        return loss_list, total_list

        # file1=open('.\loss',mode='w',encoding='utf-8')
        # json.dump(loss_list,file1)


if __name__ == '__main__':
    # quarter1, quarter2, quarter3, quarter4, start, end, quarter1_cut, quarter2_cut, quarter3_cut, quarter4_cut, word2id=makeword2id()
    # make_word_vector()
    # make_training_data()
    # print('发生大事'.encode('gbk').decode('gbk'))
    # dict=json.load(open(r'.\word2id4',encoding='utf-8'))
    # d1=defaultdict(lambda :0)
    # d2=defaultdict(lambda :0)
    # for k,v in dict.items():
    #     for c in v['content']:
    #         d1[len(c)]+=1
    #     d2[len(v['content'])]+=1
    # train_data, label=get_batch()
    # a=1
    net = NewsNet(type=2)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    sess.run(tf.global_variables_initializer())
    loss_list, acc_list = net.run(sess)
    # print(sorted(acc_list, key=lambda x: x, reverse=True)[0])
    # make_news()
    # train_data, label, train_data_length, label_m=get_batch()
    a = 2
