import json
import torch
from torch.utils.data import DataLoader, Dataset
from keywordpredic import kwinference
from transformers import BertTokenizer, ErnieForSequenceClassification
from sklearn.cluster import KMeans

# 从JSON文件中加载标签到ID的映射字典
with open('/Users/xietongxue/code/python/nlp/backend/label2id.json', 'r', encoding='utf-8') as f:
    label2id_dict = json.load(f)


class TempDataset(Dataset):
    def __init__(self, texts, tokenizer, label2id, max_length):
        self.texts = texts
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.label2id = label2id

    def __len__(self):
        return len(self.texts)

    def find_split_point(self, text, start, max_length):
        split_point = min(start + max_length, len(text) - 1)
        while split_point > start:
            if text[split_point] in "。！？：；":
                if split_point - start + 1 <= max_length:
                    return split_point + 1
            split_point -= 1
        return start + max_length

    def segment_and_tokenize(self, text):
        input_ids, attention_masks = [], []
        i = 0
        while i < len(text):
            if len(text[i:]) > self.max_length - 2:
                split_point = self.find_split_point(text, i, self.max_length - 2)
            else:
                split_point = len(text)
            segment_text = text[i:split_point]
            encoded_dict = self.tokenizer.encode_plus(
                segment_text,
                is_split_into_words=True,
                max_length=self.max_length,
                padding='max_length',
                truncation=True,
                return_attention_mask=True
            )
            input_ids.append(encoded_dict['input_ids'])
            attention_masks.append(encoded_dict['attention_mask'])
            i = split_point

        input_ids = torch.tensor(input_ids, dtype=torch.long)
        attention_masks = torch.tensor(attention_masks, dtype=torch.long)

        return {
            'input_ids': input_ids,
            'attention_mask': attention_masks
        }

    def __getitem__(self, idx):
        text = self.texts[idx]
        return self.segment_and_tokenize(text)

    def collate_fn(self, batch):
        input_ids = torch.cat([item['input_ids'] for item in batch], dim=0)
        attention_mask = torch.cat([item['attention_mask'] for item in batch], dim=0)

        return {
            'input_ids': input_ids,
            'attention_mask': attention_mask
        }


def find_sentence_boundaries(text, start, end):
    # 向前寻找句子的起始边界
    while start > 0 and text[start - 1] not in "。！？：；":
        start -= 1

    # 向后寻找句子的结束边界
    while end < len(text) - 1 and text[end + 1] not in "。！？：；":
        end += 1

    # 包含最后的标点符号
    if end < len(text) - 1:
        end += 1

    return start, end


def split_texts_by_positions(text, positions):
    segments = []
    segments.append(text)
    for start, end in positions:
        # 找到包含位置的完整句子
        sentence_start, sentence_end = find_sentence_boundaries(text, start, end)
        segments.append(text[sentence_start:sentence_end + 1])
    return segments

def predict(text):
    predictor = kwinference()
    attr, view, positions = predictor(text)
    print(attr,view,positions)
    view_embed = torch.stack(tuple(view.values()))
    attr ,view = list(attr.keys()),list(view.keys())
    kmeans = KMeans(n_clusters=3, random_state=0)
    clusters = kmeans.fit_predict(view_embed.numpy())
    attr.insert(0, '整体')
    segments = split_texts_by_positions(text, positions)
    # 加载预训练的ERNIE模型和tokenizer
    model_name = "/Users/xietongxue/code/python/nlp/backend/ernie-3.0-mini-zh"
    tokenizer = BertTokenizer.from_pretrained(model_name)
    model = ErnieForSequenceClassification.from_pretrained(model_name, num_labels=len(label2id_dict))
    model.load_state_dict(torch.load('/Users/xietongxue/code/python/nlp/backend/model.pth', map_location=torch.device('cpu')))
    # 创建临时数据集和 DataLoader
    temp_dataset = TempDataset(segments, tokenizer, label2id_dict, max_length=128)
    temp_loader = DataLoader(temp_dataset, batch_size=8, collate_fn=temp_dataset.collate_fn)
    predictions = []
    with torch.no_grad():
        for batch in temp_loader:
            input_ids = batch['input_ids']
            attention_mask = batch['attention_mask']
            outputs = model(input_ids, attention_mask=attention_mask)
            logits = outputs.logits
            preds = torch.argmax(logits, dim=-1)
            predictions.extend(preds.cpu().numpy())
    id2label = {v: k for k, v in label2id_dict.items()}
    predicted_labels = [id2label[pred] for pred in predictions]

    return attr,predicted_labels,view,clusters


if __name__=='__main__':

    texts = ('成分很安全的一款面膜，对香精敏感的美眉慎入，作为日常补水款足够了。滋润，不油腻，味道也喜欢。'
             '一直在用，感觉还是不错，这次还有小礼品，谢谢啦！还有快递小哥，今天下了雪如期送达，'
             '感谢！不敢相信这么便宜的面膜一点不比八块十块的差。面膜纸丝薄服帖我很喜欢，补水用也不会心疼。回回购')
    attr,predicted_labels,view,clusters=predict(texts)

