#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: data_utils.py 
@time: 2022/05/02
@software: PyCharm 
description:
"""

import os

import pandas as pd
import numpy as np
from torchtext.data import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import pickle

from utils.parameters import Config

def load_csv(config):
    """
    加载文件
    """
    data=pd.read_csv(config.data_path)
    return data

#构建词典过程
#1.建立分词器
#2.对每局话处理
def build_tokenizer():
    """
    建立分词器
    """
    tokenzier=get_tokenizer("basic_english")
    return tokenzier


#分词器对每一句话处理
def yeild_tokens(config ,tokenizer):
    """
    train_data_iter:dataset
    tokenizer:分词器
    """
    data=load_csv(config)
    for i in range(len(data)):
        comment=data["review"].iloc[i]
        yield tokenizer(comment)

#构建词典
#构建vocab
def build_vocab(tokenizer,config):
    """
    config:
    tokenizer:分词器
    """
    vocab = build_vocab_from_iterator(yeild_tokens(config,tokenizer),min_freq=20, specials=["<unk>","<pad>"])
    #存储词表
    if not os.path.exists(config.vocab_path):
        with open(config.vocab_path,"wb") as f:
            pickle.dump(vocab.get_stoi(),f)
        vocab=vocab.get_stoi()
        print(f"词表建立成功:{config.vocab_path},单词表大小: {len(vocab)}")
    else:
        print(f"词表已存在：{config.vocab_path}")


################################################################
def content_to_id(content,config):
    """
    将文本转化为tensor类型的token——id
    params:
    content:处理文本
    config:参数
    """
    content_id=[]

    tokenizer=build_tokenizer()
    tokens=tokenizer(content)

    if config.max_length > len(tokens):
        tokens.extend(["<pad>"]*(config.max_length-len(tokens)))
    else:
        tokens=tokens[:config.max_length]

    for token in tokens:
        content_id.append(config.vocab.get(token, config.vocab.get("<unk>")))
    return content_id

def label_to_id(label):
    """
    label转化为id
    """
    label_id=0
    if label=="positive":
        label_id=1
    else:
        label_id=0
    return label_id


################################################################
def vocab_word2vec(config):
    """
    根据词表和预训练word2vec,得到符合本模型的embedding
    """
    word_2_id=config.vocab
    embeddings=np.random.rand(len(word_2_id),300)
    with open(config.pretrain_dir,"r",encoding="utf-8") as f:
        lines=f.readlines()
        for i,line in enumerate(lines):
            result=line.strip().split(" ")
            if result[0] in word_2_id:
                idx=word_2_id[result[0]]
                emb=[float(x) for x in result[1:]]
                embeddings[idx]=np.asarray(emb,dtype="float")
    np.savez_compressed(config.pretrain_save_dir,embeddings=embeddings)
    print(f"词向量生成完成：{config.pretrain_save_dir}")


################################################################
# def collate_fn(batch):
#     """
#     对DataLoader所生成的mini-batch进行后处理
#     """
#     target = []
#     token_index = []
#
#     for i,(comment,label) in enumerate(batch):
#         token_index.append(content_to_id(config.vocab,comment))
#         if label == "positive":
#             target.append(0)
#         else:
#             target.append(1)
#
#     return (torch.tensor(token_index).to(torch.int32).to(config.device),torch.tensor(target).to(torch.long).to(config.device)