# coding = utf-8
import datetime
import os
import sqlite3
import shutil

import requests
import time
from datetime import timedelta

import numpy as np
import tensorflow as tf
import tensorflow.contrib.keras as kr
from sklearn import metrics
from collections import Counter
from cnn_model import TCNNConfig, TextCNN


class Coach(object):
    base_dir = 'data/cnews'
    train_data = os.path.join(base_dir, 'train.txt')
    test_data = os.path.join(base_dir, 'test.txt')
    val_data = os.path.join(base_dir, 'val.txt')
    vocab_data = os.path.join(base_dir, 'vocab.txt')

    save_dir = 'trained/%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    save_path = os.path.join(save_dir, 'best_validation')  # 最佳验证结果保存路径
    outputs = []


    def __init__(self):
        self.db = sqlite3.connect("cnn-classifying.db")
        self.db.execute("CREATE TABLE IF NOT EXISTS samples (id INTEGER PRIMARY KEY, label STRING (64), content TEXT);")
        self.db.execute("DELETE FROM samples WHERE 1;")
        self.db.commit()
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

    def work(self):
        # self.prepare_samples()
        self.prepare_samples_test()
        model, cat_to_id, word_to_id, config = self.init_cnn_model()
        self.train(model, cat_to_id, word_to_id, config)
        self.test(model, cat_to_id, word_to_id, config)
        self.save_all(cat_to_id)

    def save_all(self, cat_to_id):
        '''保存词汇表和类别名称'''
        shutil.copy(self.vocab_data, os.path.join(self.save_dir, 'vocab.txt'))
        with open(os.path.join(self.save_dir, 'categories.txt'), 'w', encoding='utf-8') as f:
            for c, id in cat_to_id.items():
                f.write(str(id))
                f.write("\t")
                f.write(c)
                f.write("\n")
        with open(os.path.join(self.save_dir, 'summary.txt'), 'w', encoding='utf-8') as f:
            for line in self.outputs:
                try:
                    f.write(line)
                    f.write("\n")
                except:
                    print(line)
        self.outputs.clear()
        packed = shutil.make_archive(self.save_dir, 'zip', self.save_dir)
        print(packed)
        shutil.rmtree(self.save_dir)

    def prepare_samples_test(self):
        def dump_to_db(name, db, index):
            with open(name, 'r', encoding='utf-8') as f:
                for line in f.readlines():
                    try:
                        label, text = line.split("\t")
                        db.execute("INSERT INTO samples VALUES (?, ?, ?);", (index, label, text))
                        index += 1
                    except:
                        pass
            return index

        index = dump_to_db(r'data/cnews/val.txt', self.db, 0)
        index = dump_to_db(r'data/cnews/test.txt', self.db, index)
        index = dump_to_db(r'data/cnews/train.txt', self.db, index)
        self.db.commit()

    def prepare_samples(self):
        sample_count_by_lable = dict()
        page = 0
        page_size = 100
        pages = 1
        while page < pages:
            ret = self.download_samples(page, page_size)
            page += 1
            pages = int((ret['count'] + page_size - 1)/page_size)
            for row in ret['result']:
                label = row['label']
                text = ';'.join((row['msg_type'], row['title'], row['sender'], row['summary'], row['content']))
                self.db.execute("INSERT INTO samples VALUES (?, ?, ?);", (row['id'], label, text))
                sample_count = sample_count_by_lable.get(label)
                if sample_count is None:
                    sample_count_by_lable[label] = 0
                sample_count_by_lable[label] += 1
        self.db.commit()
        to_sort = []
        for k,v in sample_count_by_lable.items():
            to_sort.append((k,v))
        to_sort.sort(key=lambda x: x[1])
        valid_count = to_sort[0][1]
        to_train = int(valid_count*0.8)
        to_validate = int(valid_count*0.1)
        to_test = valid_count - to_train - to_validate
        # if to_train<1000 or to_validate<100 or to_test<100:
        #     print("samples is too small, please collect more samples and try again!")
        #     return
        with open("train.txt", "w", encoding="utf-8") as file_to_train, \
            open("validate.txt", "w", encoding="utf-8") as file_to_validate, \
            open("test.txt", "w", encoding="utf-8") as file_to_test:
            for label in sample_count_by_lable.keys():
                self.save_data_file(label, file_to_test, 0, to_test)
                self.save_data_file(label, file_to_validate, to_test, to_validate)
                self.save_data_file(label, file_to_train, to_validate, to_train)

    def save_data_file(self, label, writer, offset, count):
        cursor = self.db.execute("SELECT id, content FROM samples WHERE label=? ORDER BY id DESC LIMIT ?,?;",
                                 (label, offset, count))
        for row in cursor.fetchall():
            writer.write(label)
            writer.write("\t")
            writer.write(row[1].replace('\n', '').replace('\t', '').replace('\u3000', ''))
            writer.write("\n")

    def init_cnn_model(self):
        print('Configuring CNN model...')
        config = TCNNConfig()
        if not os.path.exists(self.vocab_data):  # 如果不存在词汇表，重建
            self.build_vocab(config.vocab_size)
        categories, cat_to_id = self.read_category()
        words, word_to_id = self.read_vocab()
        config.vocab_size = len(words)
        model = TextCNN(config)
        self.outputs.append("目标类别：" + ",".join(categories))
        self.outputs.append("词汇表大小：%d" % config.vocab_size )
        return model, cat_to_id, word_to_id, config


    def train(self, model, cat_to_id, word_to_id, config):
        print("Configuring TensorBoard and Saver...")
        # 配置 Tensorboard，重新训练时，请将tensorboard文件夹删除，不然图会覆盖
        tensorboard_dir = 'tensorboard/textcnn'
        if not os.path.exists(tensorboard_dir):
            os.makedirs(tensorboard_dir)

        tf.summary.scalar("loss", model.loss)
        tf.summary.scalar("accuracy", model.acc)
        merged_summary = tf.summary.merge_all()
        writer = tf.summary.FileWriter(tensorboard_dir)

        # 配置 Saver
        saver = tf.train.Saver()
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

        print("Loading training and validation data...")
        self.outputs.append("Loading training and validation data...")
        # 载入训练集与验证集
        start_time = time.time()
        x_train, y_train = Coach.process_file(self.train_data, word_to_id, cat_to_id, config.seq_length)
        x_val, y_val = Coach.process_file(self.val_data, word_to_id, cat_to_id, config.seq_length)
        time_dif = Coach.get_time_dif(start_time)
        print("Time usage:", time_dif)
        self.outputs.append("Time usage:%s" % str(time_dif))

        # 创建session
        session = tf.Session()
        session.run(tf.global_variables_initializer())
        writer.add_graph(session.graph)

        print('Training and evaluating...')
        self.outputs.append('Training and evaluating...')
        start_time = time.time()
        total_batch = 0  # 总批次
        best_acc_val = 0.0  # 最佳验证集准确率
        last_improved = 0  # 记录上一次提升批次
        require_improvement = 1000  # 如果超过1000轮未提升，提前结束训练

        flag = False
        for epoch in range(config.num_epochs):
            print('Epoch:', epoch + 1)
            self.outputs.append('Epoch:%d' % (epoch + 1))
            batch_train = Coach.batch_iter(x_train, y_train, config.batch_size)
            for x_batch, y_batch in batch_train:
                feed_dict = Coach.feed_data(model, x_batch, y_batch, config.dropout_keep_prob)

                if total_batch % config.save_per_batch == 0:
                    # 每多少轮次将训练结果写入tensorboard scalar
                    s = session.run(merged_summary, feed_dict=feed_dict)
                    writer.add_summary(s, total_batch)

                if total_batch % config.print_per_batch == 0:
                    # 每多少轮次输出在训练集和验证集上的性能
                    feed_dict[model.keep_prob] = 1.0
                    loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
                    loss_val, acc_val = Coach.evaluate(model, session, x_val, y_val)  # todo

                    if acc_val > best_acc_val:
                        # 保存最好结果
                        best_acc_val = acc_val
                        last_improved = total_batch
                        saver.save(sess=session, save_path=self.save_path)
                        improved_str = '*'
                    else:
                        improved_str = ''

                    time_dif = Coach.get_time_dif(start_time)
                    msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                          + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
                    out = msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str)
                    print(out)
                    self.outputs.append(out)

                session.run(model.optim, feed_dict=feed_dict)  # 运行优化
                total_batch += 1

                if total_batch - last_improved > require_improvement:
                    # 验证集正确率长期不提升，提前结束训练
                    print("No optimization for a long time, auto-stopping...")
                    self.outputs.append("No optimization for a long time, auto-stopping...")
                    flag = True
                    break  # 跳出循环
            if flag:  # 同上
                break
        time_dif = Coach.get_time_dif(start_time)
        print("Time usage:", time_dif)
        self.outputs.append("Time usage:%s" % str(time_dif))

    def test(self, model, cat_to_id, word_to_id, config):
        print("Loading test data...")
        start_time = time.time()
        x_test, y_test = Coach.process_file(self.test_data, word_to_id, cat_to_id, config.seq_length)

        session = tf.Session()
        session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess=session, save_path=self.save_path)  # 读取保存的模型

        print('Testing...')
        self.outputs.append('Testing...')
        loss_test, acc_test = Coach.evaluate(model, session, x_test, y_test)
        msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
        out = msg.format(loss_test, acc_test)
        print(out)
        self.outputs.append(out)

        batch_size = 128
        data_len = len(x_test)
        num_batch = int((data_len - 1) / batch_size) + 1

        y_test_cls = np.argmax(y_test, 1)
        y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)  # 保存预测结果
        for i in range(num_batch):  # 逐批次处理
            start_id = i * batch_size
            end_id = min((i + 1) * batch_size, data_len)
            feed_dict = {
                model.input_x: x_test[start_id:end_id],
                model.keep_prob: 1.0
            }
            y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)

        # 评估
        print("Precision, Recall and F1-Score...")
        self.outputs.append("Precision, Recall and F1-Score...")
        out = metrics.classification_report(y_test_cls, y_pred_cls, target_names=cat_to_id.keys())
        print(out)
        self.outputs.append(out)

        # 混淆矩阵
        print("Confusion Matrix...")
        self.outputs.append("Confusion Matrix...")
        cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
        print(cm)
        self.outputs.append(str(cm))

        time_dif = Coach.get_time_dif(start_time)
        print("Time usage:", time_dif)
        self.outputs.append("Time usage:%s" % str(time_dif))
        pass

    def download_samples(self, page, page_size=100):
        for _ in range(3):
            try:
                r= requests.get("http://47.94.112.105:8050/content/samples/read?page=%d&page_size=%d" % (page, page_size),
                            timeout = 10)
                return r.json()
            except Exception as e:
                if _ == 2:
                    raise e
        pass

    def build_vocab(self, vocab_size=5000):
        """根据训练集构建词汇表，存储"""
        data_train, _ = Coach.read_data_file(self.train_data)

        all_data = []
        for content in data_train:
            all_data.extend(content)

        counter = Counter(all_data)
        count_pairs = counter.most_common(vocab_size - 1)
        words, _ = list(zip(*count_pairs))

        with open(self.vocab_data, 'r', encoding='utf-8', errors='ignore') as writer:
            # 添加一个 <PAD> 来将所有文本pad为同一长度
            writer.write('<PAD>\n')
            for w in words:
                writer.write(w)
                writer.write('\n')

    def read_vocab(self):
        """读取词汇表"""
        # words = open_file(vocab_dir).read().strip().split('\n')
        with open(self.vocab_data, 'r', encoding='utf-8', errors='ignore') as fp:
            # 如果是py2 则每个值都转化为unicode
            words = [_.strip() for _ in fp.readlines()]
        word_to_id = dict(zip(words, range(len(words))))
        return words, word_to_id

    def read_category(self):
        """读取分类目录，固定"""
        categories = []
        cursor = self.db.execute("SELECT distinct(label) FROM samples WHERE 1;")
        for row in cursor.fetchall():
            categories.append(row[0])

        cat_to_id = dict(zip(categories, range(len(categories))))

        return categories, cat_to_id

    @staticmethod
    def process_file(filename, word_to_id, cat_to_id, max_length=600):
        """将文件转换为id表示"""
        contents, labels = Coach.read_data_file(filename)

        data_id, label_id = [], []
        for i in range(len(contents)):
            data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
            label_id.append(cat_to_id[labels[i]])

        # 使用keras提供的pad_sequences来将文本pad为固定长度
        x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
        y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id))  # 将标签转换为one-hot表示

        return x_pad, y_pad

    @staticmethod
    def batch_iter(x, y, batch_size=64):
        """生成批次数据"""
        data_len = len(x)
        num_batch = int((data_len - 1) / batch_size) + 1

        indices = np.random.permutation(np.arange(data_len))
        x_shuffle = x[indices]
        y_shuffle = y[indices]

        for i in range(num_batch):
            start_id = i * batch_size
            end_id = min((i + 1) * batch_size, data_len)
            yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]

    @staticmethod
    def feed_data(model, x_batch, y_batch, keep_prob):
        feed_dict = {
            model.input_x: x_batch,
            model.input_y: y_batch,
            model.keep_prob: keep_prob
        }
        return feed_dict

    @staticmethod
    def evaluate(model, sess, x_, y_):
        """评估在某一数据上的准确率和损失"""
        data_len = len(x_)
        batch_eval = Coach.batch_iter(x_, y_, 128)
        total_loss = 0.0
        total_acc = 0.0
        for x_batch, y_batch in batch_eval:
            batch_len = len(x_batch)
            feed_dict = Coach.feed_data(model, x_batch, y_batch, 1.0)
            loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
            total_loss += loss * batch_len
            total_acc += acc * batch_len

        return total_loss / data_len, total_acc / data_len


    @staticmethod
    def read_data_file(filename):
        """读取文件数据"""
        contents, labels = [], []
        with open(filename, 'r', encoding='utf-8', errors='ignore') as f:
            for line in f:
                try:
                    label, content = line.strip().split('\t')
                    if content:
                        contents.append(list(content))
                        labels.append(label)
                except:
                    pass
        return contents, labels

    @staticmethod
    def get_time_dif(start_time):
        """获取已使用时间"""
        end_time = time.time()
        time_dif = end_time - start_time
        return timedelta(seconds=int(round(time_dif)))

    @staticmethod
    def str_time_diff(diff):
        seconds = diff.seconds

        out = []
        if diff.days:
            out.append("%d天 " % diff.days)
        hours = int(seconds/36000)
        if hours or diff.days:
            out.append("%d小时" % hours)
            seconds = seconds%3600
        minutes = int(seconds/60)
        if minutes or hours or diff.days:
            out.append("%d分钟" % minutes)
            seconds = seconds % 60
        out.append("%d秒" % seconds)
        return ''.join(out)



if __name__ == '__main__':
    coach = Coach()
    coach.work()