# -*- coding:utf-8 -*-

from config import parsers
from transformers import BertTokenizer
import pandas as pd
import numpy as np
import ast
import re
from gensim.models import Word2Vec
import torch
from sklearn.preprocessing import MultiLabelBinarizer


def read_file(file_path,encoding='utf-8'):
    #数据标签样例：'[标签1，标签2]'，所以需要使用ast
    data=pd.read_csv(file_path,encoding=encoding)
    data=data.iloc[1000:2000]
    data['标签'] = data['标签'].apply(ast.literal_eval)
    return data['歌词'],data['标签']

class data_processing():
    #数据处理
    def __init__(self,texts,labels,bert=True):
        self.texts=texts
        self.labels=labels
        self.max_len=parsers().max_len
        self.bert=bert
        self.tokenizer = BertTokenizer.from_pretrained(parsers().bert_pred)
        self.sentences = [[i for i in re.sub(r'[^\w\s]', '', j)] for j in self.texts]  #处理数据过程
        self.model_w2v = Word2Vec(self.sentences, sg=1, hs=1, vector_size=parsers().hidden_size, window=5,min_count=1)  # sentences 是处理后的分词文本数据
        self.vocab_dict = self.model_w2v.wv.key_to_index  # 词字典len:3509
        embedding_matrix = np.zeros((len(self.vocab_dict) + 1, self.model_w2v.vector_size))
        for word, i in self.model_w2v.wv.key_to_index.items():
            embedding_matrix[i] = self.model_w2v.wv[word]
        self.embedding_matrix = torch.tensor(embedding_matrix, dtype=torch.float32)

    def texts_processing(self):
        if self.bert:
            # bert-textcnn、bert
            return [i for i in self.texts]
        else:
            # random-textcnn、word2vec-textcnn
            processed_sentences = []
            for words in self.sentences:
                if len(words) >= self.max_len:
                    # 截断文本至最大长度
                    processed_sentences.append([self.vocab_dict[word] for word in words[:self.max_len]])
                else:
                    # 填充文本至最大长度
                    processed_sentences.append([self.vocab_dict[word] for word in words] + [len(self.vocab_dict)] * (self.max_len - len(words)))
            return torch.tensor(processed_sentences)
            # tensor([[2209, 2105, 2283,  ...,    0,    0,    0],
            #         [2071, 2786, 2094,  ...,    0,    0,    0],
            #         [1923, 2093, 1581,  ...,    0,    0,    0],
            #         ...,
            #         [2625, 2540, 1653,  ...,    0,    0,    0],
            #         [2996, 2209, 2259,  ...,    0,    0,    0],
            #         [ 906, 1120,  906,  ...,    0,    0,    0]])

    def labels_processing(self):
        mlb = MultiLabelBinarizer()
        labels_binary = mlb.fit_transform(self.labels)
        return labels_binary