import pandas as pd
import numpy as np
from datetime import datetime
import re
import os
import logging
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime, Float, Table, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import json
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("data_processor.log"), logging.StreamHandler()]
)
logger = logging.getLogger("data_processor")

# 获取数据库配置
DB_HOST = os.getenv('DB_HOST', 'localhost')
DB_PORT = os.getenv('DB_PORT', '3306')
DB_USER = os.getenv('DB_USER', 'root')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'root')
DB_NAME = os.getenv('DB_NAME', 'brand_crisis')

# 声明基类
Base = declarative_base()

class DataProcessor:
    """数据处理器，用于清洗和结构化爬取的数据"""
    
    def __init__(self, db_connect=True):
        """
        初始化数据处理器
        
        Args:
            db_connect: 是否连接数据库
        """
        self.engine = None
        self.session = None
        
        if db_connect:
            self._connect_db()
    
    def _connect_db(self):
        """连接到MySQL数据库"""
        try:
            # 创建数据库连接
            db_url = f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
            self.engine = create_engine(db_url)
            
            # 创建会话
            Session = sessionmaker(bind=self.engine)
            self.session = Session()
            
            # 创建表
            self._create_tables()
            
            logger.info("数据库连接成功")
            return True
            
        except Exception as e:
            logger.error(f"数据库连接失败: {str(e)}")
            return False
    
    def _create_tables(self):
        """创建数据库表结构"""
        metadata = MetaData()
        
        # 新闻表
        self.news_table = Table(
            'news', metadata,
            Column('id', Integer, primary_key=True, autoincrement=True),
            Column('title', String(255)),
            Column('url', String(255)),
            Column('source', String(100)),
            Column('published_time', String(50)),
            Column('summary', Text),
            Column('content', Text),
            Column('crawl_time', DateTime),
            Column('platform', String(50)),
            Column('keyword', String(100))
        )
        
        # 社交媒体表
        self.social_media_table = Table(
            'social_media', metadata,
            Column('id', Integer, primary_key=True, autoincrement=True),
            Column('platform', String(50)),
            Column('content_type', String(50)),
            Column('title', String(255)),
            Column('content', Text),
            Column('user', String(100)),
            Column('published_time', String(50)),
            Column('likes', Integer),
            Column('reposts', Integer),
            Column('comments', Integer),
            Column('url', String(255)),
            Column('keyword', String(100)),
            Column('crawl_time', DateTime)
        )
        
        # 官方声明表
        self.official_statement_table = Table(
            'official_statement', metadata,
            Column('id', Integer, primary_key=True, autoincrement=True),
            Column('title', String(255)),
            Column('content', Text),
            Column('publish_date', String(50)),
            Column('url', String(255)),
            Column('source', String(100)),
            Column('platform', String(50)),
            Column('reposts', Integer),
            Column('comments', Integer),
            Column('likes', Integer),
            Column('crawl_time', DateTime)
        )
        
        # 清洗后的综合数据表 (用于危机评估)
        self.crisis_data_table = Table(
            'crisis_data', metadata,
            Column('id', Integer, primary_key=True, autoincrement=True),
            Column('data_type', String(50)),  # news, social, official
            Column('title', String(255)),
            Column('content', Text),
            Column('source', String(100)),
            Column('published_time', DateTime),
            Column('url', String(255)),
            Column('platform', String(50)),
            Column('engagement', Integer),  # 互动量(评论+转发+点赞总和)
            Column('sentiment_score', Float),  # 情感得分 (-1到1)
            Column('crisis_related', Integer),  # 是否与危机相关 (0/1)
            Column('crisis_keyword', String(100)),  # 触发的危机关键词
            Column('brand_name', String(100)),  # 品牌名称
            Column('processed_time', DateTime)
        )
        
        # 创建表
        metadata.create_all(self.engine)
        logger.info("数据库表创建成功")
    
    def process_news_data(self, news_file):
        """
        处理新闻数据并导入数据库
        
        Args:
            news_file: Excel文件路径
            
        Returns:
            处理的记录数量
        """
        try:
            df = pd.read_excel(news_file)
            logger.info(f"从 {news_file} 读取了 {len(df)} 条新闻数据")
            
            # 清洗数据
            df['crawl_time'] = pd.to_datetime(df['crawl_time'])
            df.fillna('', inplace=True)
            
            # 转换为字典列表
            records = df.to_dict('records')
            
            # 导入数据库
            if self.engine:
                conn = self.engine.connect()
                result = conn.execute(self.news_table.insert(), records)
                conn.close()
                logger.info(f"成功导入 {result.rowcount} 条新闻数据到数据库")
                return result.rowcount
            else:
                logger.warning("数据库未连接，无法导入数据")
            
            return len(records)
            
        except Exception as e:
            logger.error(f"处理新闻数据出错: {str(e)}")
            return 0
    
    def process_social_media_data(self, social_file):
        """
        处理社交媒体数据并导入数据库
        
        Args:
            social_file: Excel文件路径
            
        Returns:
            处理的记录数量
        """
        try:
            df = pd.read_excel(social_file)
            logger.info(f"从 {social_file} 读取了 {len(df)} 条社交媒体数据")
            
            # 清洗数据
            df['crawl_time'] = pd.to_datetime(df['crawl_time'])
            
            # 处理缺失列
            if 'content_type' not in df.columns:
                df['content_type'] = ''
            if 'title' not in df.columns:
                df['title'] = ''
            if 'user' not in df.columns:
                df['user'] = ''
            if 'likes' not in df.columns:
                df['likes'] = 0
            if 'reposts' not in df.columns:
                df['reposts'] = 0
            if 'comments' not in df.columns:
                df['comments'] = 0
            if 'url' not in df.columns:
                df['url'] = ''
                
            df.fillna({'likes': 0, 'reposts': 0, 'comments': 0, 'content_type': '', 'title': '', 'user': '', 'url': ''}, inplace=True)
            
            # 转换为字典列表
            records = df.to_dict('records')
            
            # 导入数据库
            if self.engine:
                conn = self.engine.connect()
                result = conn.execute(self.social_media_table.insert(), records)
                conn.close()
                logger.info(f"成功导入 {result.rowcount} 条社交媒体数据到数据库")
                return result.rowcount
            else:
                logger.warning("数据库未连接，无法导入数据")
            
            return len(records)
            
        except Exception as e:
            logger.error(f"处理社交媒体数据出错: {str(e)}")
            return 0
    
    def process_official_statement_data(self, statement_file):
        """
        处理官方声明数据并导入数据库
        
        Args:
            statement_file: Excel文件路径
            
        Returns:
            处理的记录数量
        """
        try:
            df = pd.read_excel(statement_file)
            logger.info(f"从 {statement_file} 读取了 {len(df)} 条官方声明数据")
            
            # 清洗数据
            df['crawl_time'] = pd.to_datetime(df['crawl_time'])
            
            # 处理缺失列
            if 'reposts' not in df.columns:
                df['reposts'] = 0
            if 'comments' not in df.columns:
                df['comments'] = 0
            if 'likes' not in df.columns:
                df['likes'] = 0
                
            df.fillna({'reposts': 0, 'comments': 0, 'likes': 0}, inplace=True)
            
            # 转换为字典列表
            records = df.to_dict('records')
            
            # 导入数据库
            if self.engine:
                conn = self.engine.connect()
                result = conn.execute(self.official_statement_table.insert(), records)
                conn.close()
                logger.info(f"成功导入 {result.rowcount} 条官方声明数据到数据库")
                return result.rowcount
            else:
                logger.warning("数据库未连接，无法导入数据")
            
            return len(records)
            
        except Exception as e:
            logger.error(f"处理官方声明数据出错: {str(e)}")
            return 0
    
    def generate_integrated_data(self, brand_name, crisis_keywords=None):
        """
        生成集成的危机数据表
        
        Args:
            brand_name: 品牌名称
            crisis_keywords: 危机关键词列表，用于识别危机相关内容
            
        Returns:
            生成的记录数量
        """
        if not self.engine:
            logger.warning("数据库未连接，无法生成集成数据")
            return 0
        
        try:
            crisis_keywords = crisis_keywords or []
            conn = self.engine.connect()
            
            # 从新闻表获取数据
            news_query = f"""
            SELECT 'news' as data_type, title, content, source, published_time, url, platform,
                   0 as engagement, 0 as sentiment_score
            FROM news
            WHERE keyword LIKE '%{brand_name}%'
            """
            news_df = pd.read_sql(news_query, conn)
            
            # 从社交媒体表获取数据
            social_query = f"""
            SELECT 'social' as data_type, title, content, user as source, published_time, url, platform,
                   (IFNULL(likes, 0) + IFNULL(reposts, 0) + IFNULL(comments, 0)) as engagement, 0 as sentiment_score
            FROM social_media
            WHERE keyword LIKE '%{brand_name}%'
            """
            social_df = pd.read_sql(social_query, conn)
            
            # 从官方声明表获取数据
            official_query = f"""
            SELECT 'official' as data_type, title, content, source, publish_date as published_time, url, platform,
                   (IFNULL(likes, 0) + IFNULL(reposts, 0) + IFNULL(comments, 0)) as engagement, 0 as sentiment_score
            FROM official_statement
            WHERE title LIKE '%{brand_name}%' OR content LIKE '%{brand_name}%'
            """
            official_df = pd.read_sql(official_query, conn)
            
            # 合并数据
            combined_df = pd.concat([news_df, social_df, official_df], ignore_index=True)
            
            # 处理数据
            combined_df['published_time'] = pd.to_datetime(combined_df['published_time'], errors='coerce')
            combined_df['published_time'].fillna(datetime.now(), inplace=True)
            combined_df['processed_time'] = datetime.now()
            combined_df['brand_name'] = brand_name
            
            # 识别危机相关内容
            combined_df['crisis_related'] = 0
            combined_df['crisis_keyword'] = ''
            
            if crisis_keywords:
                for keyword in crisis_keywords:
                    mask = (
                        combined_df['title'].str.contains(keyword, case=False, na=False) | 
                        combined_df['content'].str.contains(keyword, case=False, na=False)
                    )
                    combined_df.loc[mask, 'crisis_related'] = 1
                    combined_df.loc[mask & (combined_df['crisis_keyword'] == ''), 'crisis_keyword'] = keyword
            
            # 转换为记录列表
            records = combined_df.to_dict('records')
            
            # 导入数据库
            if records:
                result = conn.execute(self.crisis_data_table.insert(), records)
                conn.close()
                logger.info(f"成功生成 {result.rowcount} 条集成危机数据")
                return result.rowcount
            else:
                logger.warning("没有找到匹配的数据")
                return 0
            
        except Exception as e:
            logger.error(f"生成集成数据出错: {str(e)}")
            return 0
    
    def export_integrated_data(self, brand_name, output_file='integrated_crisis_data.xlsx'):
        """
        导出集成的危机数据到Excel文件
        
        Args:
            brand_name: 品牌名称
            output_file: 输出的Excel文件名
            
        Returns:
            是否成功导出
        """
        if not self.engine:
            logger.warning("数据库未连接，无法导出数据")
            return False
        
        try:
            conn = self.engine.connect()
            query = f"""
            SELECT * FROM crisis_data
            WHERE brand_name = '{brand_name}'
            ORDER BY published_time DESC
            """
            df = pd.read_sql(query, conn)
            conn.close()
            
            if len(df) == 0:
                logger.warning(f"没有找到品牌 '{brand_name}' 的集成数据")
                return False
            
            # 导出到Excel
            df.to_excel(output_file, index=False, engine='openpyxl')
            logger.info(f"成功导出 {len(df)} 条集成数据到 {output_file}")
            return True
            
        except Exception as e:
            logger.error(f"导出集成数据出错: {str(e)}")
            return False
    
    def close(self):
        """关闭数据库连接"""
        if self.session:
            self.session.close()
        logger.info("数据库连接已关闭")

if __name__ == "__main__":
    # 测试代码
    processor = DataProcessor(db_connect=True)
    
    try:
        # 测试处理新闻数据
        news_file = "小米手机_news_data.xlsx"
        if os.path.exists(news_file):
            processor.process_news_data(news_file)
        
        # 测试处理社交媒体数据
        social_file = "小米手机_social_data.xlsx"
        if os.path.exists(social_file):
            processor.process_social_media_data(social_file)
        
        # 测试处理官方声明数据
        statement_file = "小米_official_statements.xlsx"
        if os.path.exists(statement_file):
            processor.process_official_statement_data(statement_file)
        
        # 生成集成数据
        processor.generate_integrated_data(
            brand_name="小米", 
            crisis_keywords=["质量问题", "爆炸", "起火", "召回", "投诉", "用户不满", "退款"]
        )
        
        # 导出集成数据
        processor.export_integrated_data("小米", "小米_integrated_crisis_data.xlsx")
        
    finally:
        # 关闭数据库连接
        processor.close() 