import pandas as pd
import jieba
import random
import numpy as np
import torch
from collections import defaultdict
from typing import List, Dict, Tuple
from no_attention.config import Config

class DataGenerator:
    def __init__(self):
        self.subjects = {
            "快递服务": ["送货速度", "服务态度", "包装质量", "配送范围", "快递员素质", 
                      "物流速度", "配送时效", "快递安全性", "取件便捷度", "客服响应"],
            "电子产品": ["电池续航", "屏幕显示", "摄像头性能", "散热系统", "处理器性能",
                      "音质表现", "操作系统", "充电速度", "信号稳定性", "外观设计"],
            "餐饮服务": ["菜品质量", "卫生状况", "服务响应", "就餐环境", "价格水平",
                      "口味特色", "食材新鲜度", "上菜速度", "分量足够度", "装修氛围"],
            "家用电器": ["能效等级", "噪音水平", "外观设计", "操作便捷性", "安全性",
                      "耐用性", "清洁效果", "加热速度", "制冷效果", "容量大小"],
            "服装鞋帽": ["面料舒适度", "款式设计", "尺码准确性", "做工精细度", "耐磨性",
                      "透气性", "保暖性", "色牢度", "防水性", "易清洗性"],
            "汽车服务": ["动力性能", "油耗水平", "内饰设计", "空间舒适度", "安全配置",
                      "操控性", "售后服务", "维修便捷性", "保值率", "品牌影响力"]
        }
        
        self.adverbs = ["", "真的", "确实", "实在", "非常", "极其", "特别", "相当", "十分"]
        
        self.adj_map = {
            "positive": ["惊艳", "卓越", "流畅", "专业", "稳定", "迅捷", "舒适", "出众", 
                       "完美", "无与伦比", "优质", "高效", "精致", "可靠", "满意", "出色",
                       "优秀", "一流", "超预期", "令人满意"],
            "negative": ["糟糕", "低劣", "迟钝", "差劲", "延迟", "缓慢", "刺耳", "敷衍",
                       "失望", "不堪", "劣质", "不稳定", "令人失望", "难以接受", "不合格",
                       "粗糙", "不专业", "令人不满"]
        }
        
        self.synonym_map = {
            "非常": ["极其", "特别", "相当", "十分", "格外"],
            "出色": ["优秀", "杰出", "卓越", "出众", "精彩"],
            "糟糕": ["差劲", "低劣", "拙劣", "不堪", "恶劣"]
        }
        
        self.must_include_samples = [
            ("这款手机的电池续航表现非常出色", "电池续航", "positive"),
            ("快递员送货速度快得惊人", "送货速度", "positive"),
            ("餐厅的菜品质量令人惊艳", "菜品质量", "positive"),
            ("服务态度恶劣到让人生气", "服务态度", "negative"),
            ("电池续航能力差到无法接受", "电池续航", "negative"),
            ("屏幕显示效果令人惊艳", "屏幕显示", "positive")
        ]
        
        self.intents = {
            "咨询": ["询问", "了解", "咨询", "想知道"],
            "投诉": ["投诉", "抱怨", "不满", "抗议"],
            "表扬": ["表扬", "称赞", "夸奖", "赞赏"],
            "建议": ["建议", "提议", "推荐", "意见"],
            "求助": ["求助", "请求帮助", "需要帮助", "求救"]
        }
        
        self.templates = [
            ("我想{intent}一下{product}的{feature}，{adverb}{adj}", 0.3),
            ("{product}的{feature}让我{intent}，{adverb}{adj}", 0.2),
            ("关于{product}的{feature}，我{intent}，{adverb}{adj}", 0.2),
            ("{intent}：{product}的{feature}{adverb}{adj}", 0.2),
            ("我{intent}{product}的{feature}，{adverb}{adj}", 0.1)
        ]
        
    def _generate_sample(self, category: str) -> Tuple[str, str, str, str]:
        """生成带意图标注的样本"""
        subject = random.choice(self.subjects[category])
        sentiment = random.choice(["positive", "negative"])
        intent = random.choice(list(self.intents.keys()))
        intent_word = random.choice(self.intents[intent])
        
        template, _ = random.choices(
            self.templates, 
            weights=[w for _, w in self.templates]
        )[0]
        
        product = random.choice(self.subjects[category])
        while product == subject and len(self.subjects[category]) > 1:
            product = random.choice(self.subjects[category])
            
        text = template.format(
            product=product,
            feature=subject,
            intent=intent_word,
            adverb=random.choice(self.adverbs),
            adj=random.choice(self.adj_map[sentiment])
        )
        
        return (text, subject, sentiment, intent)

    def generate(self, num_samples=50000):
        samples = set()
        for sample in self.must_include_samples:
            text, subject, sentiment = sample
            samples.add((text, subject, sentiment, "表扬" if sentiment == "positive" else "投诉"))
        
        category_dist = self._get_category_distribution(num_samples)
        
        for category, count in category_dist.items():
            for _ in range(count):
                sample = self._generate_sample(category)
                samples.add(sample)
                
                for _ in range(random.randint(0, 2)):
                    variant = self._augment_sample(sample)
                    samples.add(variant)
        
        samples = list(samples)[:num_samples]
        return pd.DataFrame(samples, columns=["text", "subject", "sentiment", "intent"])

    def _get_category_distribution(self, total: int) -> Dict[str, int]:
        dist = {}
        remaining = total - len(self.must_include_samples)
        categories = list(self.subjects.keys())
        base_count = remaining // len(categories)
        
        for cat in categories:
            dist[cat] = base_count
        
        for i in range(remaining % len(categories)):
            dist[categories[i]] += 1
            
        return dist

    def _augment_sample(self, sample: Tuple[str, str, str, str]) -> Tuple[str, str, str, str]:
        text, subject, sentiment, intent = sample
        words = jieba.lcut(text)
        
        for i in range(len(words)):
            if words[i] in self.synonym_map:
                words[i] = random.choice(self.synonym_map[words[i]])
                
        if random.random() < 0.3 and len(words) > 3:
            words.insert(random.randint(0, len(words)), random.choice(self.adverbs))
            
        if len(words) > 4 and random.random() < 0.2:
            i, j = random.sample(range(len(words)), 2)
            words[i], words[j] = words[j], words[i]
            
        return ("".join(words), subject, sentiment, intent)

class TextDataset:
    def __init__(self, df, word2idx):
        self.df = df.reset_index(drop=True)
        self.word2idx = word2idx
        self.sentiment_map = {"positive": 0, "negative": 1}
        self.intent_map = {"咨询": 0, "投诉": 1, "表扬": 2, "建议": 3, "求助": 4}

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        words = jieba.lcut(row["text"])[:Config.MAX_LEN]
        
        text_ids = [self.word2idx.get(word, 1) for word in words]
        pad_len = Config.MAX_LEN - len(text_ids)
        text_ids += [0] * pad_len

        subject_tags = self._mark_subject(words, row["subject"])
        subject_tags += [0] * pad_len

        return {
            "input_ids": torch.tensor(text_ids, dtype=torch.long),
            "subject_tags": torch.tensor(subject_tags, dtype=torch.float),
            "sentiment": torch.tensor(self.sentiment_map[row["sentiment"]], dtype=torch.long),
            "intent": torch.tensor(self.intent_map.get(row["intent"], 0), dtype=torch.long)
        }

    def _mark_subject(self, words: List[str], subject: str) -> List[int]:
        tags = [0] * len(words)
        subject_words = jieba.lcut(subject)
        
        for i, word in enumerate(words):
            if word in subject_words:
                tags[i] = 1
                
        if sum(tags) == 0:
            for i, word in enumerate(words):
                for sw in subject_words:
                    if sw in word or word in sw:
                        tags[i] = 1
                        break
                        
        return tags