import os
import re

import gensim
import  pandas as pd
import  numpy as np
from config import  *
import tensorflow as tf
import  jieba
import tqdm
from sklearn.model_selection import  train_test_split
from gensim.models import  word2vec
from scipy.sparse import csr_matrix

sequence_length = 10
max_words = 300000
model  = None

def load_stopwords(path):
    stop_words = []
    with open(path,encoding='utf-8') as f:
        line = f.readline()
        while(line):
            line = line.replace('\n','')
            stop_words.append(line)
            line = f.readline()
    return stop_words

refine_sentence = []


# 去掉文本中的停用词
def drop_stopwords(contents, stopwords):
    contents_clean = []
    for line in contents:
        # print('删前',line)
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
        contents_clean.append(line_clean)
        # print('删后',line_clean)
    return contents_clean

# print(drop_stopwords(fenci_list,stopwords))

def delete_symbol(sentences):
    sentence_after_sub = []
    for sentence in sentences:
        cop = re.sub('[^\u4e00-\u9fa5^]','',sentence)
        sentence_after_sub.append(cop)
    return sentence_after_sub

def splitwords(datas):
    # result = jieba.cut(line,cut_all=False)
    with open('./sentences.txt', 'a', encoding='utf-8') as ff:
        for line in datas:
            seg_list = jieba.cut(line,cut_all=True)
            for word in seg_list:
                ff.write(word+' ')  # 词汇用空格分开
            ff.write(' ')

    cut_words = map(lambda s:list(jieba.cut(s,cut_all=True)),datas)
    return list(cut_words)
    # return ' '.join(result)
    # print()

words_list = []

# labels = []

tabitems = ['ID', 'Text', 'Labels']
prior_targets = []
def load_split(path):
    # words_list = []
    # datas = []
    # labels = ['ID','Text','Labels']
    datas = []
    targets = []
    prior_targets_weights = []
    df = pd.read_csv(path,dtype=str)
    df.head()
    total_ID = df[tabitems[0]]
    total_sentences = df[tabitems[1]]
    total_sequences = df[tabitems[2]]
    total_sequences = total_sequences.astype(dtype=str)
    rows  = len(total_ID)
    print(rows)
    # sentencedic = {i:i for i in range(class_num)}
    samplenum = 0
    for i in range(rows-1):
        sentence = total_sentences[i]
        sequence=total_sequences[i]
        samplenum +=1
        if(sentence is float):
            print("读取到nan")
            continue
        splits = sentence.split('__eou__')

        seq_len = len(sequence)
        split_len =len(splits)

        rs  = delete_symbol(splits)

        if( split_len!= seq_len):
            print("读取时，序列与实际分段不吻合")
            exit(0)
        else:
            for i in range(seq_len):
                targets.append(int(sequence[i])-1)
                prior_targets_weights.append(gen_prior_targets_weight(sequence[0:i],categories=class_num))
                # print(prior_targets_weight)
                for word in rs[i]:
                    # tokenized_words.append(nltk.word_tokenize(word))
                    words_list.append(word)
                datas.append(rs[i])
                # index = int(sequence[i])-1


    print('共',str(samplenum)+'条')
    return datas,targets,prior_targets_weights


def lsivec2array(lsi_vec):
    data = []
    rows = []
    cols = []
    line_count = 0
    for line in lsi_vec:
        for elem in line:
            rows.append(line_count)
            cols.append(elem[0])
            data.append(elem[1])
        line_count += 1
    lsi_sparse_matrix = csr_matrix((data, (rows, cols)))  # 稀疏向量
    lsi_matrix = lsi_sparse_matrix.toarray()  # 密集向量
    return lsi_matrix
    # print(lsi_matrix)



def write_to_tfrecords(x,y):
    train_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\train.tfrecords')  # 要生成的文件
    # validation_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\valid')
    test_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\test.tfrecords')  # 要生成的文件

    # y = np.reshape(y,(-1,1))
    # x = np.reshape(x, (-1,20))
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=2)
    limit_epoch = x_train.shape[0]
    current_epoch = 0
    train_generator = zip(x_train, y_train)
    for sample_slice, label_slice in train_generator:

        sample_slice = np.asarray(a=sample_slice, dtype=np.float32)
        label_slice = np.asarray(a=label_slice, dtype=np.float32)
        # print(label_slice.shape)
        example = tf.train.Example(features=tf.train.Features(feature={
            'tokens': tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample_slice.tobytes()])),
            'targets': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label_slice.tobytes()]))
        }))  # example对象对label和image数据进行封装
        train_set_writer.write(example.SerializeToString())  # 序列化为字符串
        current_epoch = current_epoch + 1
        print(current_epoch)
        if (current_epoch >= limit_epoch):
            break;
    train_set_writer.close()
    print("Done train_set writing")

    for sample_slice, label_slice in zip(x_test, y_test):
        sample_slice = np.asarray(a=sample_slice, dtype=np.float32)
        label_slice = np.asarray(a=label_slice, dtype=np.float32)
        example = tf.train.Example(features=tf.train.Features(feature={
            'tokens': tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample_slice.tobytes()])),
            'targets': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label_slice.tobytes()]))
        }))  # example对象对label和image数据进行封装
        test_set_writer.write(example.SerializeToString())  # 序列化为字符串
    test_set_writer.close()
    print("Done test_set writing")
    print("train_size %d test size %d"%(x_train.shape[0],x_test.shape[0]))
    print(x_train.shape)
    print(x_test.shape)


def write_to_tfrecords(x,y,prior_weights):
    train_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\train.tfrecords')  # 要生成的文件
    # validation_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\valid')
    test_set_writer = tf.python_io.TFRecordWriter('..\\tf_records\\test.tfrecords')  # 要生成的文件

    # y = np.reshape(y,(-1,1))
    # x = np.reshape(x, (-1,20))
    x_train, x_test, y_train, y_test,prior_weight_train,prior_weight_test = train_test_split(x, y, prior_weights,test_size=0.2,random_state=2,stratify=y)
    # x_train, x_test, prior_weight_train, prior_weight_test = train_test_split(x, prior_weights, test_size=0.2, random_state=2, stratify=y)

    limit_epoch = x_train.shape[0]
    current_epoch = 0
    train_generator = zip(x_train, y_train,prior_weight_train)
    for sample_slice, label_slice,prior_weight in train_generator:

        sample_slice = np.asarray(a=sample_slice, dtype=np.float32)
        label_slice = np.asarray(a=label_slice, dtype=np.float32)
        prior_weight = np.asarray(a=prior_weight,dtype=np.float32)
        # print(label_slice.shape)
        example = tf.train.Example(features=tf.train.Features(feature={
            'tokens': tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample_slice.tobytes()])),
            'targets': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label_slice.tobytes()])),
            'prior_weights':tf.train.Feature(bytes_list=tf.train.BytesList(value=[prior_weight.tobytes()]))
        }))  # example对象对label和image数据进行封装
        train_set_writer.write(example.SerializeToString())  # 序列化为字符串
        current_epoch = current_epoch + 1
        print(current_epoch)
        if (current_epoch >= limit_epoch):
            break;
    train_set_writer.close()
    print("Done train_set writing")

    for sample_slice, label_slice,prior_weight in zip(x_test, y_test,prior_weight_test):
        sample_slice = np.asarray(a=sample_slice, dtype=np.float32)
        label_slice = np.asarray(a=label_slice, dtype=np.float32)
        prior_weight= np.asarray(a=prior_weight, dtype=np.float32)
        example = tf.train.Example(features=tf.train.Features(feature={
            'tokens': tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample_slice.tobytes()])),
            'targets': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label_slice.tobytes()])),
            'prior_weights': tf.train.Feature(bytes_list=tf.train.BytesList(value=[prior_weight.tobytes()]))
        }))  # example对象对label和image数据进行封装
        test_set_writer.write(example.SerializeToString())  # 序列化为字符串
    test_set_writer.close()
    print("Done test_set writing")
    print("train_size %d test size %d"%(x_train.shape[0],x_test.shape[0]))
    print(x_train.shape)
    print(x_test.shape)


csv_path = '.\\raw_data\\train_data.csv'

embedding_size = 100

def gen_dict(class_num):
    dic = dict.fromkeys(range(class_num), 0)
    for i in range(class_num):
        dic[i] = list()
    return dic
def trans_dict(class_num,sentences,targets):
    dic =  gen_dict(class_num)
    sentences = np.array(sentences)
    targets = np.array(targets)
    for sentence, target in zip(sentences, targets):
        # print(sentence)

        dic[target].append(sentence)
    return dic


class_num = 6

def gen_prior_targets_weight(sequence,categories):
    rs = np.ones(categories,dtype=np.uint8)
    if(len(sequence)>0):
        for seq in sequence:
            index = int(seq)-1
            rs[index] =rs[index]+1
            # print('index', index, 'rs', rs)
        rs = rs / np.sum(rs)
    print('rs/np.sum(rs)',rs/np.sum(rs))
    return rs



if __name__ =="__main__":

    # stopwords = load_stopwords('hit_stopwords.txt')
    datas,targets,prior_targets_weights = load_split(csv_path)
    # # 统计词汇
    # np.save('.\\restore\wordslist',words_list)
    # np.save('.\\restore\\targets', targets)
    # np.save('.\\restore\\prior_targets_weights', prior_targets_weights)
    # np.save('.\\restore\sentences', datas)
    datas = np.array(datas)
    targets = np.array(targets)
    print(datas.shape)
    print(targets.shape)
    x_train, x_test, y_train, y_test = train_test_split(datas, targets, test_size=0.2,random_state=2,stratify=targets)
    np.save('.\\restore\\x_train',x_train)
    np.save('.\\restore\\x_test', x_test)
    np.save('.\\restore\\y_train', y_train)
    np.save('.\\restore\\y_test', y_test)








