# -*- coding: utf-8 -*-
# @Time    : 2019/4/25 20:31
# @Author  : DrMa
import jieba
import json
from tqdm import tqdm
import numpy as np

# jieba.load_userdict('./data_CAIL/user_dict.txt')
def jieba_user_dict(file):
    f=open(file,'w+',encoding='utf-8')
    zm_dic = load_zm_dic('.\data\\zm_dic.json')
    zms=zm_dic.keys()
    for zm in zms:
        f.write(zm+'\n')
    f.write('AA0'+'\n'+'AA1')
    f.close()
def get_word_dict_and_save(train,test,valid,filename,low_count=2):#注意位置参数要放在默认参数的前面
    #保存单词字典
    name_list=[train,test,valid]
    txt=[]
    for name in name_list:
        f=open(name,'r+',encoding='utf-8')
        lines=f.read().strip('\n\n').split('\n\n')
        f.close()
        for line in tqdm(lines):
            fact=line.split('\n')[1]
            words=list(jieba.cut(fact))
            txt.extend(words)
    wd2index={}
    for word in txt:
        if not word in wd2index:
            wd2index[word]=1
        else:
            wd2index[word]+=1
    del txt

    wd2id_tuples=wd2index.items()

    new_wd2id={k:v for k,v in wd2id_tuples if v>low_count}#写入条件


    new_wd2id['unk']=-1
    f=open(filename,'w+',encoding='utf-8')
    json_str=json.dumps(new_wd2id,ensure_ascii=False)
    f.write(json_str)
    f.close()
# get_word_dict_and_save('./data_CAIL/train_fact.txt','./data_CAIL/test_fact.txt','./data_CAIL/valid_fact.txt',filename='./data_CAIL/wd_dic.json')

def load_word_dict(filename):
    f=open(filename,encoding='utf-8')
    word_dict=json.loads(f.read())
    f.close()
    return word_dict

def get_zm_dict(train,test,valid):
    #zm to index
    #拿到每个罪名,对应的频次. 一共有200项罪名
    zm_dic={}
    zm_s=[]
    namelist=[train,test,valid]
    for i in namelist:
        f=open(i,'r+',encoding='utf-8')
        lines=f.read().split('\n')
        f.close()
        for line in tqdm(lines):
            zm_list=line.split('#')#.split('\n')[0]
            if zm_list!=['']:
                zm_s.extend(zm_list)
    for zm in zm_s:
        if not zm in zm_dic:
            zm_dic[zm]=1
        else:
            zm_dic[zm]+=1
    zm_name_count=list(zip(zm_dic.keys(),zm_dic.values()))#可以直接使用dict.items()
    zm_name_count.sort(key=lambda s:s[1],reverse=True)#sort的高端用法
    zm_dic={}
    for i in zm_name_count:
        zm_dic[i[0]]=len(zm_dic)
    return zm_dic
# zm_dic=get_zm_dict('.\data_CAIL\\train_fact.txt','.\data_CAIL\\test_fact.txt','.\data_CAIL\\valid_fact.txt')
def save_zm_dic(zm_dic,zm_dic_file):
    f=open(zm_dic_file,'w+',encoding='utf-8')
    temp_dic_str=json.dumps(zm_dic,ensure_ascii=False)
    f.write(temp_dic_str)
    f.close()
# save_zm_dic(zm_dic,'.\data_CAIL\\zm_dic.json')
def load_zm_dic(zm_dic_file):
    f=open(zm_dic_file,'r+',encoding='utf-8')
    zm_dic=json.loads(f.read())
    f.close()
    return zm_dic


def get_word2id_and_embedding(filename):
    print('word_embedding loading....')
    word_embeddings=[]
    word2id={}
    f=open(filename,'rb')#二进制读取
    contents=f.readlines()
    index=0
    for line in tqdm(contents):
        if index>0:
            #读取解决方案核心代码,就是加了个'ignore'
            content_str = line.decode(encoding='utf-8', errors='ignore')
            content_list = content_str.strip().split()
            if len(content_list) == 101 and (not content_list[0] in word2id):
                word2id[content_list[0]] = len(word2id)
                embedding_per_word = content_list[1:]
                embedding_per_word = [float(i) for i in embedding_per_word]  # embedding part
                word_embeddings.append(embedding_per_word)
        index+=1
    f.close()
    word2id["UNK"]=len(word2id)
    word2id["BLANK"]=len(word2id)#主要是为了padding
    #plus embeddings for UNK and BLANK
    embedding_for_unk_blank=[0.0 for i in range(100)]
    word_embeddings.append(embedding_for_unk_blank)
    word_embeddings.append(embedding_for_unk_blank)
    word_embeddings=np.asarray(word_embeddings,dtype=np.float32)
    print('word_embedding loaded')
    return word2id,word_embeddings

# zm_dict=load_zm_dic('./zm_dic.json')


#分词,并index化,然后保存到本地
def process_data_segment_index(path,path4write,word2id,zm_dic):
    f=open(path,'r+',encoding='utf-8')
    f2=open(path4write,'w+',encoding='utf-8')
    lines=f.read().strip('\n\n').split('\n\n')
    print(len(lines))
    for line in tqdm(lines):
        zms=line.split('\n')[0].split('#')
        fact=line.split('\n')[1]
        zm_ids=[zm_dic[i.strip('\r')] for i in zms]
        zm_ids.sort()#我们统一升序
        zm_ids=[str(i) for i in zm_ids]
        fact_segment=list(jieba.cut(fact))
        fact_segment_ids=[str(word2id.get(word,word2id['UNK'])) for word in fact_segment]
        f2.write(' '.join(fact_segment_ids)+'    '+' '.join(zm_ids)+'\n')
    f.close()
    f2.close()
# process_data_segment_index('./train_fact.txt','./train',word2id,zm_dict)
# process_data_segment_index('./test_fact.txt','./test',word2id,zm_dict)
# process_data_segment_index('./valid_fact.txt','./valid',word2id,zm_dict)

def load_data(train_file,test_file,valid_file,word2id,max_doc_len=400,num_zm=106):
    # name_list=[train_file,test_file,valid_file]

    def process_data(file,max_doc_len,num_zm):
        xs=[]
        ys=[]#把y处理成[[0,3,-1,-1],[0,3,-1,-1]]的样子
        f=open(file,'r+',encoding='utf-8')
        lines = f.readlines()
        f.close()
        for line in tqdm(lines):
            fact = line.strip('\n').split('    ')[0].split(' ')#['1','34','56',....]
            len_fact = len(fact)
            # fact_trunc=[fact[:max_doc_len] if len_fact>max_doc_len else fact+[word2id['BLANK']]*(max_doc_len-len_fact)]
            if len_fact > max_doc_len:
                fact_trunc =fact[:max_doc_len]
            else:
                fact_trunc=fact+[word2id['BLANK']]*(max_doc_len-len_fact)
            # fact_trunc=[int(x) for x in fact_trunc]
            xs.append(fact_trunc)

            zms=line.strip('\n').split('    ')[1].split()
            zm_one_hot=np.zeros(shape=num_zm,dtype=np.float32)
            zms=[int(x)for x in zms]
            zms=np.asarray(zms)
            zm_one_hot[zms]=1.0
            ys.append(zm_one_hot)
        return xs,ys


    xs_train, ys_train = process_data(train_file, max_doc_len, num_zm)
    xs_train, ys_train = np.asarray(xs_train, dtype=np.int32), np.asarray(ys_train, dtype=np.float32)

    xs_test, ys_test = process_data(test_file, max_doc_len, num_zm)
    xs_test, ys_test = np.asarray(xs_test, dtype=np.int32), np.asarray(ys_test, dtype=np.float32)

    xs_valid, ys_valid = process_data(valid_file, max_doc_len, num_zm)
    xs_valid, ys_valid = np.asarray(xs_valid, dtype=np.int32), np.asarray(ys_valid, dtype=np.float32)
    return xs_train,ys_train, xs_test,ys_test, xs_valid,ys_valid



def batch_iter(data, batch_size, num_epochs, shuffle=True):#batch生成器
    data=np.asarray(data)
    data_size = len(data)
    num_batches_per_epoch = int(round(len(data) / batch_size))  # 一个epoch中有多少个batch
    for epoch in range(num_epochs):
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))#乱序输出一个长度为data_size的np.array
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):
            start_index = batch_num * batch_size#开始的索引
            end_index = min((batch_num + 1) * batch_size, data_size)#每个batch结束的索引
            yield shuffled_data[start_index:end_index]


# word2id,word_embeddings=get_word2id_and_embedding('../data_CAIL/words.vec')
# xs_train,ys_train, xs_test,ys_test, xs_valid,ys_valid=load_data('train','test','valid',word2id)
# batches_test=batch_iter(list(zip(xs_valid, ys_valid)),2,1)
# for i in range(10):
#     batch_valid=batches_test.__next__()
#     print(batch_valid)