import pandas as pd
import jieba
import random
import numpy as np
import torch

from no_attention.config import Config


class DataGenerator:
    def __init__(self):
        self.subjects = {
            "快递服务": ["送货速度", "服务态度", "包装质量", "配送范围", "快递员素质", "物流速度", "配送时效",
                         "快递安全性", "取件便捷度", "客服响应"],
            "电子产品": ["电池续航", "屏幕显示", "摄像头性能", "散热系统", "处理器性能", "音质表现", "操作系统",
                         "充电速度", "信号稳定性", "外观设计", "手感舒适度", "系统流畅度"],
            "餐饮服务": ["菜品质量", "卫生状况", "服务响应", "就餐环境", "价格水平", "口味特色", "食材新鲜度",
                         "上菜速度", "分量足够度", "装修氛围", "停车便利性", "位置交通"],
            "家用电器": ["能效等级", "噪音水平", "外观设计", "操作便捷性", "安全性", "耐用性", "清洁效果", "加热速度",
                         "制冷效果", "容量大小"],
            "服装鞋帽": ["面料舒适度", "款式设计", "尺码准确性", "做工精细度", "耐磨性", "透气性", "保暖性", "色牢度",
                         "防水性", "易清洗性"],
            "汽车服务": ["动力性能", "油耗水平", "内饰设计", "空间舒适度", "安全配置", "操控性", "售后服务",
                         "维修便捷性", "保值率", "品牌影响力"]
        }

        self.adj_map = {
            "positive": ["惊艳", "卓越", "流畅", "专业", "稳定", "迅捷", "舒适", "出众", "完美", "无与伦比",
                         "优质", "高效", "精致", "可靠", "满意", "出色", "优秀", "一流", "超预期", "令人满意"],
            "negative": ["糟糕", "低劣", "迟钝", "差劲", "延迟", "缓慢", "刺耳", "敷衍", "失望", "不堪",
                         "劣质", "不稳定", "令人失望", "难以接受", "不合格", "粗糙", "不专业", "令人不满"]
        }

        self.templates = [
            "这款{product}的{feature}实在{adj}",
            "必须说这个{product}的{feature}非常{adj}",
            "使用后感觉{product}的{feature}{adverb}{adj}",
            "我认为这个{product}的{feature}相当{adj}",
            "体验了一段时间，{product}的{feature}{adverb}{adj}",
            "相比其他产品，这个{product}的{feature}{adverb}更{adj}",
            "和之前用过的比起来，这款{product}的{feature}真是{adj}",
            "在众多同类产品中，这个{product}的{feature}算是{adj}的了",
            "强烈推荐这款{product}，{feature}非常{adj}",
            "不推荐购买，{product}的{feature}太{adj}了",
            "值得入手，尤其是{product}的{feature}特别{adj}",
            "用了一个月，{product}的{feature}依然{adj}",
            "刚收到就感觉{product}的{feature}非常{adj}",
            "长期使用后发现{product}的{feature}越来越{adj}",
            "太{adj}了，这个{product}的{feature}简直让人无法接受",
            "好{adj}啊，{product}的{feature}真的超出预期",
            "简直{adj}到家了，{product}的{feature}让我很满意",
            "从细节可以看出{product}的{feature}非常{adj}",
            "仔细体验后发现{product}的{feature}做得相当{adj}",
            "就这个价位来说，{product}的{feature}算是{adj}的了",
            "这个{product}的{feature}让我感到{adj}",
            "我对{product}的{feature}感到{adj}",
            "这个{product}的{feature}让我觉得{adj}",
            "我对{product}的{feature}非常{adj}",
            "这个{product}的{feature}让我非常{adj}"
        ]

        self.adverbs = ["", "真的", "确实", "实在", "非常", "极其", "特别", "相当", "十分", "格外", "异常", "尤其"]

    def generate(self, num_samples=50000):
        # 强制包含关键样本，确保有基础数据
        samples_set = set([
            ("这款手机的电池续航表现非常出色", "电池续航", "positive"),
            ("快递员送货速度快得惊人", "送货速度", "positive"),
            ("餐厅的菜品质量令人惊艳", "菜品质量", "positive"),
            ("服务态度恶劣到让人生气", "服务态度", "negative"),
            ("电池续航能力差到无法接受", "电池续航", "negative"),
            ("屏幕显示效果令人惊艳", "屏幕显示", "positive")
        ])
        
        attempts = 0
        max_attempts = num_samples * 20  # 增加最大尝试次数
        
        # 直接生成更多样本
        while len(samples_set) < num_samples and attempts < max_attempts:
            attempts += 1
            category = random.choice(list(self.subjects.keys()))
            subject_list = self.subjects[category]
            feature = random.choice(subject_list)
            sentiment = random.choice(["positive", "negative"])
            template = random.choice(self.templates)
            
            # 确保product和feature不同，增加多样性
            product = feature
            while product == feature and len(subject_list) > 1:
                product = random.choice(subject_list)
            
            text = template.format(
                product=product,
                feature=feature,
                adverb=random.choice(self.adverbs),
                adj=random.choice(self.adj_map[sentiment])
            )
            
            # 简化有效性检查
            samples_set.add((text, feature, sentiment))
            
            # 每生成一个样本，尝试变换一下
            if len(samples_set) % 100 == 0 and len(samples_set) > 0:
                # 对已有样本进行轻微变换，增加数据量
                sample = random.choice(list(samples_set))
                text, feature, sentiment = sample
                words = text.split()
                
                # 简单的词序变换
                if len(words) > 3:
                    i, j = random.sample(range(len(words)), 2)
                    words[i], words[j] = words[j], words[i]
                    new_text = " ".join(words)
                    samples_set.add((new_text, feature, sentiment))
        
        samples = list(samples_set)[:num_samples]  # 确保不超过要求的样本数
        print(f"成功生成 {len(samples)} 条不重复样本，尝试次数: {attempts}")
        return pd.DataFrame(samples, columns=["text", "subject", "sentiment"])

    def is_valid_sample(self, text):
        # 简化有效性检查，几乎所有样本都应该有效
        return True

class TextDataset:
    def __init__(self, df, word2idx):
        self.df = df.reset_index(drop=True)
        self.word2idx = word2idx
        self.sentiment_map = {"positive": 0, "negative": 1}

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        words = jieba.lcut(row["text"])[:Config.MAX_LEN]

        # 文本编码
        text_ids = [self.word2idx.get(word, 1) for word in words]
        pad_len = Config.MAX_LEN - len(text_ids)
        text_ids += [0] * pad_len

        # 改进主语标注方法
        subject = row["subject"]

        # 创建一个更精确的主语标注
        subject_tags = [0] * len(words)

        # 方法1: 直接在原始文本中查找主语
        original_text = row["text"]
        if subject in original_text:
            # 找到主语在原始文本中的位置
            start_pos = original_text.find(subject)
            end_pos = start_pos + len(subject)

            # 将原始文本中的位置映射到分词后的位置
            char_pos = 0
            word_positions = []
            for i, word in enumerate(words):
                word_start = char_pos
                word_end = char_pos + len(word)
                word_positions.append((word_start, word_end))
                char_pos += len(word)

            # 标记与主语重叠的词
            for i, (word_start, word_end) in enumerate(word_positions):
                # 检查这个词是否与主语有重叠
                if max(word_start, start_pos) < min(word_end, end_pos):
                    subject_tags[i] = 1
        else:
            # 方法2: 使用分词后的主语进行匹配
            subject_words = jieba.lcut(subject)

            # 检查每个词是否是主语的一部分
            for i, word in enumerate(words):
                # 精确匹配
                if word in subject_words:
                    subject_tags[i] = 1
                    continue

                # 部分匹配 (主语词是当前词的一部分)
                for sw in subject_words:
                    if len(sw) > 1 and (sw in word or word in sw):
                        subject_tags[i] = 1
                        break

        subject_tags += [0] * pad_len

        return {
            "input_ids": torch.tensor(text_ids, dtype=torch.long),
            "subject_tags": torch.tensor(subject_tags, dtype=torch.float),
            "sentiment": torch.tensor(self.sentiment_map[row["sentiment"]], dtype=torch.long)
        }