"""
旅游景点标签生成系统 v3.0
功能：
1. 多线程并行处理
2. 安全类型转换
3. 存储优化（Parquet + Snappy压缩）
4. 完善的日志和进度监控
"""

import pandas as pd
import spacy
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm.auto import tqdm
import logging
import pyarrow.parquet as pq
import os
from collections import defaultdict
from typing import List, Dict, Set
import numpy as np
import re

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler('tag_generator.log'),
        logging.StreamHandler()
    ]
)

class EnhancedTagGenerator:
    """增强版标签生成器"""

    def __init__(self):
        # 加载NLP模型
        try:
            self.nlp = spacy.load("en_core_web_sm")
        except OSError:
            logging.error("缺少SpaCy模型，请先执行：python -m spacy download en_core_web_sm")
            raise

        # 配置标签规则
        self.tag_rules = {
            'architecture': {
                r'\bTower\b': '塔式建筑',
                r'\bWall\b': '防御工事',
                r'\bPalace\b': '宫殿',
                r'\bTemple\b': '宗教建筑'
            },
            'world_heritage': [
                'Great Wall of China',
                'Taj Mahal',
                'Colosseum'
            ],
            'activity': {
                'Tower': ['摄影热点', '城市景观'],
                'Wall': ['徒步路线', '历史探索'],
                'Palace': ['文化体验', '皇家历史']
            }
        }

        # 初始化缓存
        self.wiki_cache = {}
        self.geo_cache = set()

    def _safe_wiki_query(self, site_name: str) -> str:
        """安全的维基数据查询"""
        if site_name in self.wiki_cache:
            return self.wiki_cache[site_name]

        try:
            url = "https://www.wikidata.org/w/api.php"
            params = {
                "action": "wbsearchentities",
                "search": site_name,
                "language": "en",
                "format": "json"
            }
            response = requests.get(url, params=params, timeout=5)
            response.raise_for_status()
            result = response.json()

            description = result.get('search', [{}])[0].get('description', '')
            self.wiki_cache[site_name] = description
            return description

        except Exception as e:
            logging.warning(f"维基查询失败 [{site_name}]: {str(e)}")
            return ''

    def _extract_geo_entities(self, text: str) -> Set[str]:
        """提取地理实体"""
        doc = self.nlp(text)
        return {ent.text for ent in doc.ents if ent.label_ in ['GPE', 'LOC']}

    def generate_tags(self, site_name: str) -> Dict[str, List[str]]:
        """生成多维度标签"""
        tags = defaultdict(list)

        try:
            # 规则匹配
            # 建筑类型
            for pattern, tag in self.tag_rules['architecture'].items():
                if re.search(pattern, site_name, flags=re.IGNORECASE):
                    tags['architecture'].append(tag)

            # 世界遗产
            if site_name in self.tag_rules['world_heritage']:
                tags['status'].append('世界遗产')

            # 地理实体
            geo_entities = self._extract_geo_entities(site_name)
            tags['geo_location'].extend(geo_entities)
            self.geo_cache.update(geo_entities)

            # 维基数据补充
            wiki_desc = self._safe_wiki_query(site_name)
            if 'UNESCO' in wiki_desc:
                tags['status'].append('UNESCO遗产')
            if 'historical' in wiki_desc.lower():
                tags['time_period'].append('历史遗迹')

            # 活动类型
            for keyword, activities in self.tag_rules['activity'].items():
                if keyword in site_name:
                    tags['activities'].extend(activities)

        except Exception as e:
            logging.error(f"标签生成错误 [{site_name}]: {str(e)}")
            tags['error'].append('生成失败')

        # 去重并排序
        return {k: sorted(list(set(v))) for k, v in tags.items()}

def parallel_tag_generation(sites: List[str], max_workers: int = 4) -> pd.DataFrame:
    """并行标签生成"""
    generator = EnhancedTagGenerator()
    results = []

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_map = {executor.submit(generator.generate_tags, site): site for site in sites}

        with tqdm(total=len(sites), desc="生成景点标签", unit="site") as pbar:
            for future in as_completed(future_map):
                site = future_map[future]
                try:
                    tag_result = future.result()
                    results.append({"site_name": site, **tag_result})
                except Exception as e:
                    logging.error(f"处理失败 [{site}]: {str(e)}")
                    results.append({"site_name": site, "error": [str(e)]})
                finally:
                    pbar.update(1)

    return pd.DataFrame(results)


def safe_feature_expansion(df: pd.DataFrame) -> pd.DataFrame:
    """安全特征展开 - 最终修复版"""
    # 清洗数据：确保所有标签列为列表类型
    for col in df.columns.difference(['site_name']):
        df[col] = df[col].apply(
            lambda x: x if isinstance(x, list) else []
        )

    # 展开标签列
    expanded_dfs = []
    for col in df.columns.difference(['site_name']):
        s = df[col].explode()
        s_filtered = s[s.apply(lambda x: isinstance(x, str) and x.strip() != '')]

        if not s_filtered.empty:
            dummies = pd.get_dummies(
                s_filtered.rename(f"{col}_"),
                prefix='',
                prefix_sep=''
            ).groupby(level=0).max()
            expanded_dfs.append(dummies)

    # 合并结果
    final_df = pd.concat([df[['site_name']], *expanded_dfs], axis=1).fillna(0)

    # 类型优化
    type_rules = {
        'category': ['site_name'],
        'uint8': final_df.columns.difference(['site_name'])
    }

    for dtype, cols in type_rules.items():
        final_df[cols] = final_df[cols].astype(dtype)

    # 删除空列
    final_df = final_df.loc[:, (final_df != 0).any(axis=0)]

    return final_df

def optimize_data_storage(df: pd.DataFrame, output_path: str) -> None:
    """优化存储"""
    # 类型转换
    for col in df.columns:
        if df[col].dtype == 'bool':
            df[col] = df[col].astype('uint8')

    # 保存为Parquet
    df.to_parquet(
        output_path,
        engine='pyarrow',
        compression='snappy',
        index=False
    )
    logging.info(f"数据已保存至 {output_path} (大小: {os.path.getsize(output_path)/1024**2:.2f} MB)")

def main(input_file: str, output_file: str):
    """主流程"""
    try:
        # 加载数据
        logging.info("开始加载数据...")
        raw_df = pd.read_csv(
            input_file,
            converters={'Sites Visited': eval},
            usecols=['Sites Visited']
        )
        sites = list(set(site for sublist in raw_df['Sites Visited'] for site in sublist))
        logging.info(f"发现 {len(sites)} 个唯一景点")

        # 生成标签
        logging.info("开始生成标签...")
        tag_df = parallel_tag_generation(sites)

        # 特征展开
        logging.info("处理特征矩阵...")
        final_df = safe_feature_expansion(tag_df)

        # 优化存储
        logging.info("优化存储格式...")
        optimize_data_storage(final_df, output_file)

        # 生成报告
        report = {
            "total_sites": len(sites),
            "generated_tags": final_df.shape[1] - 1,
            "memory_usage": final_df.memory_usage().sum() / 1024**2,
            "compression_ratio": os.path.getsize(output_file) / (final_df.memory_usage().sum())
        }
        logging.info(
            f"处理完成\n"
            f"生成标签数: {report['generated_tags']}\n"
            f"内存使用: {report['memory_usage']:.2f} MB\n"
            f"压缩率: {report['compression_ratio']:.1%}"
        )

        return final_df

    except Exception as e:
        logging.critical(f"主流程错误: {str(e)}")
        raise

if __name__ == "__main__":
    # 配置参数
    INPUT_CSV = "tourism_dataset_5000.csv"
    OUTPUT_PARQUET = "optimized_sites.parquet"

    # 执行主程序
    result_df = main(INPUT_CSV, OUTPUT_PARQUET)

    # 打印示例
    print("\n生成数据示例:")
    print(result_df.iloc[:3, :5].to_markdown(index=False))