import os
import re
import requests
import json
import time
import random
import jieba
import jieba.analyse
from snownlp import SnowNLP
from datetime import datetime, timedelta
from urllib.parse import quote  # 添加导入语句
import logging
import pandas as pd
import csv 
# ----------------------
# 1. 数据采集模块
# ----------------------
class DataCollector:
    def __init__(self, cookie=None, proxy_pool=None):
        """初始化微博数据采集器"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Referer': 'https://m.weibo.cn/search?containerid=100103type=1',
            'X-Requested-With': 'XMLHttpRequest'
        }
        
        if cookie:
            self.headers['Cookie'] = cookie
            
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        self.proxy_pool = proxy_pool or []
        self.last_request_time = 0
    def save_to_database(self, data):
        """增强版数据库存储（支持涉警舆情数据）"""
        import sqlite3
        conn = sqlite3.connect('public_opinion.db')
        try:
            c = conn.cursor()
            c.execute('''CREATE TABLE IF NOT EXISTS posts
                        (id TEXT PRIMARY KEY, text TEXT, user TEXT, 
                        reposts INTEGER, comments INTEGER, likes INTEGER,
                        timestamp TEXT, platform TEXT, sentiment REAL,
                        keywords TEXT, alert_type TEXT, risk_label INTEGER,
                        police_terms TEXT, context_snippet TEXT)''')

            for post in data:
                # 数据校验和转换
                alert_info = post.get('alert_type', {})
                if isinstance(alert_info, str):
                    alert_info = {'type': alert_info, 'risk_level': 0}

                # 确保所有字段存在
                safe_post = {
                    'id': str(post.get('id', '')),
                    'text': str(post.get('text', '')),
                    'user': str(post.get('user', '')),
                    'reposts': int(post.get('reposts', 0)),
                    'comments': int(post.get('comments', 0)),
                    'likes': int(post.get('likes', 0)),
                    'timestamp': str(post.get('timestamp', '')),
                    'platform': str(post.get('platform', 'weibo')),
                    'sentiment': float(post.get('sentiment', 0.5)),
                    'keywords': ','.join(map(str, post.get('keywords', []))),
                    'alert_type': str(alert_info.get('type', '其他')),
                    'risk_label': int(alert_info.get('risk_level', 0)),
                    'police_terms': ','.join(alert_info.get('keywords', [])),
                    'context_snippet': str(alert_info.get('context', ''))[:200]
                }

                c.execute('''INSERT OR REPLACE INTO posts 
                            VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', 
                        tuple(safe_post.values()))
            conn.commit()
            logging.info(f"成功存储 {len(data)} 条数据（含 {sum(1 for p in data if p.get('alert_type', {}).get('type') == '涉警事件')} 条涉警舆情）")
        except Exception as e:
            logging.error(f"数据库存储失败: {str(e)}")
            # 临时保存为CSV备份
            pd.DataFrame(data).to_csv(
                "backup_data.csv",
                index=False,
                encoding='utf_8_sig',  # 关键修复点
                quoting=csv.QUOTE_ALL,  # 所有字段加引号
                escapechar='\\',        # 转义符设置
                errors='replace'       # 替换无法编码的字符
            )
            logging.info("备份数据已保存为 backup_data.csv (UTF-8 BOM编码)")
        finally:
            conn.close()
    
    def save_raw_data(self,raw_posts):
        """将原始数据存储到SQLite数据库"""
        import sqlite3
        try:
            # 连接原始数据库
            raw_conn = sqlite3.connect('raw_data.db')
            raw_cursor = raw_conn.cursor()
            
            # 创建原始数据表（如果不存在）
            raw_cursor.execute('''
                CREATE TABLE IF NOT EXISTS raw_weibo_data (
                    id TEXT PRIMARY KEY,
                    raw_json TEXT NOT NULL,
                    stored_at DATETIME DEFAULT CURRENT_TIMESTAMP
                )
            ''')
            
            # 插入原始数据
            for post in raw_posts:
                post_id = post.get('id', '')
                raw_json = json.dumps(post, ensure_ascii=False)
                raw_cursor.execute(
                    "INSERT OR IGNORE INTO raw_weibo_data (id, raw_json) VALUES (?, ?)",
                    (post_id, raw_json)
                )
            
            raw_conn.commit()
            logging.info(f"成功存储{len(raw_posts)}条原始数据到raw_data.db")
        except Exception as e:
            logging.error(f"原始数据存储失败: {str(e)}")
        finally:
            if raw_conn:
                raw_conn.close()

    # 在 DataCollector 类中添加容器ID获取方法
    def get_containerid(self, keyword):
        """获取实时搜索的容器ID"""
        search_url = f"https://m.weibo.cn/api/container/getIndex?type=all&q={keyword}"
        try:
            response = self.session.get(search_url, headers=self.headers)
            data = response.json()
            
            # 新版API结构处理
            if 'data' in data:
                # 方式1：从cardlistInfo获取
                if 'cardlistInfo' in data['data']:
                    return data['data']['cardlistInfo']['containerid']
                # 方式2：从tabsInfo获取
                if 'tabsInfo' in data['data']:
                    for tab in data['data']['tabsInfo']['tabs']:
                        if tab.get('title') in ('实时', '热门'):
                            return tab['containerid']
            # 方式3：从第一个卡片获取
            if 'cards' in data['data'] and len(data['data']['cards']) > 0:
                return data['data']['cards'][0]['card_group'][0]['scheme'].split('=')[-1]
                
        except Exception as e:
            logging.error(f"获取容器ID失败: {str(e)}")
            logging.debug(f"API响应内容: {response.text}")
        return f"100103type=1&q={keyword}"  # 最终fallback
    
    # 在DataCollector类中添加重试机制和代理支持
    # 改进后的微博爬虫核心逻辑
    def crawl_weibo(self, keyword, pages=20, start_date=None, end_date=None):

        data = []
        containerid = self.get_containerid(keyword)
        
        # 设置默认时间范围（最近一个月）
        today = datetime.now()
        if not start_date:
            start_date = (today - timedelta(days=30)).strftime('%Y-%m-%d-0')
        if not end_date:
            end_date = today.strftime('%Y-%m-%d-23')
        
        max_pages = min(pages, 50)  # 微博最多只显示50页结果[1](@ref)
        
        for page in range(1, max_pages + 1):
            
                # 1. 动态延迟策略（关键页休眠5-10秒）[8](@ref)
                current_time = time.time()
                if page % 3 == 0 and current_time - self.last_request_time < 10:
                    sleep_time = random.uniform(5, 10)
                    logging.info(f"关键页延迟 {sleep_time:.1f}秒 防封禁")
                    time.sleep(sleep_time)
                
                # 2. 构造请求URL（含时间范围过滤）[3](@ref)
                url = (
                    f"https://m.weibo.cn/api/container/getIndex?"
                    f"containerid={containerid}&page={page}&"
                    f"q={quote(keyword)}&"
                    f"starttime={start_date}&endtime={end_date}"
                )
                
                # 3. 请求时添加超时+代理IP轮换[6,8](@ref)
                request_params = {'timeout': 15}
                if self.proxy_pool:
                    proxy = random.choice(self.proxy_pool)
                    request_params['proxies'] = {"https": proxy}
                    logging.debug(f"使用代理IP: {proxy}")
                
                # 4. 控制请求频率（最小间隔1秒）[8](@ref)
                elapsed = current_time - self.last_request_time
                if elapsed < 1.0:
                    time.sleep(1.0 - elapsed)

                response = self.session.get(url,**request_params)
                self.last_request_time = time.time()
                
                # 5. 空数据检查与跳过[1](@ref)
                json_data = response.json()
                if not json_data.get('ok') or not json_data.get('data'):
                    logging.warning(f"第{page}页无数据，跳过")
                    continue
                    
                # 6. 解析微博卡片数据
                cards = json_data['data'].get('cards', [])
                page_data_count = 0
                
                for card in cards:
                    if 'card_group' not in card:
                        continue
                        
                    for item in card['card_group']:
                        if 'mblog' in item:
                            blog = item['mblog']
                            
                            # 7. 数据标准化处理
                            blog_data = {
                                'id': blog['id'],
                                'text': self.clean_text(blog.get('text', '')),
                                'user': blog['user']['screen_name'],
                                'reposts': blog['reposts_count'],
                                'comments': blog['comments_count'],
                                'likes': blog['attitudes_count'],
                                'timestamp': self.format_time(blog['created_at']),
                                'platform': 'weibo',
                                'page': page
                            }
                            
                            data.append(blog_data)
                            page_data_count += 1
                
                # 8. 进度日志
                logging.info(f"已采集第{page}/{max_pages}页, 本页数据: {page_data_count}条, 累计: {len(data)}条")
                
                # 9. 空页提前终止
                if page_data_count == 0:
                    logging.info(f"第{page}页无数据，提前终止采集")
                    break
                
                # 10. 随机延迟（普通页0-1秒）[6](@ref)
                time.sleep(random.uniform(0, 1))
                
        
        # 11. 最终结果统计
        logging.info(f"采集完成! 总页数: {max_pages}, 有效数据: {len(data)}条")
        return data
    
    def clean_text(self, text):
            """清洗微博文本内容[4](@ref)"""
            # 移除HTML标签
            text = re.sub(r'<.*?>', '', text)
            # 移除话题标签
            text = re.sub(r'#\w+#', '', text)
            # 移除@用户
            text = re.sub(r'@[\w\u4e00-\u9fa5]+', '', text)
            # 移除URL
            text = re.sub(r'https?://\S+', '', text)
            # 替换特殊字符
            text = text.replace('\u200b', '').replace('&nbsp;', ' ')
            return text.strip()
    
    def format_time(self, weibo_time):
        """标准化微博时间格式[3](@ref)"""
        try:
            # 转换各种时间格式为统一格式
            if re.match(r'\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} \+\d{4} \d{4}', weibo_time):
                return datetime.strptime(weibo_time, '%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d %H:%M:%S')
            else:
                return weibo_time
        except:
            return weibo_time
# ----------------------
# 2. NLP处理模块
# ----------------------
# 增强警察术语识别
class NLPProcessor:
    def __init__(self):
        # 修改为使用Stopwords_Chinese.txt文件
        try:
            with open("Stopwords_Chinese.txt", "r", encoding='utf-8') as f:
                self.stopwords = set(line.strip() for line in f if line.strip())
        except UnicodeDecodeError:
            try:
                with open("", "r", encoding='gbk') as f:
                    self.stopwords = set(line.strip() for line in f if line.strip())
            except Exception as e:
                logging.error(f"无法读取停用词文件: {str(e)}")
                self.stopwords = set()
        
        # 参数初始化
        self.alpha = 0.8  # 情感权重系数
        self.beta = 1.2   # 关键词风险系数
        self.gamma = 0.5  # 时间衰减系数
        self.theta = 0.7  # 社会网络影响系数
        self.epsilon = 0.6  # 上下文线索权重
        self.phi = 0.9  # 信息传播速度
        self.chi = 0.8  # 风险预测阈值
        self.omega = 0.7  # 风险更新速度
        self.zeta = 0.5  # 风险传播速度
        self.kappa = 0.6  # 风险衰减系数

    def extract_keywords(self, text, topK=10):
        """基于TF-IDF的关键词提取（集成停用词过滤）"""
        try:
            # 1. 使用jieba提取原始关键词（含权重）
            keywords_with_weight = jieba.analyse.textrank(
                text,
                topK=topK * 2,  # 多提取一些候选词
                withWeight=True,
                allowPOS=('n', 'vn', 'v')
            )
            
            # 2. 过滤停用词
            filtered_keywords = [
                (word, weight) 
                for word, weight in keywords_with_weight
                if word not in self.stopwords  # 关键过滤步骤
            ]
            
            # 3. 返回前topK个结果
            return [kw[0] for kw in filtered_keywords[:topK]]
            
        except Exception as e:
            logging.error(f"关键词提取失败: {str(e)}")
            return []

    def classify_alert_type(self, text):
        file_path = r"D:\Users\Lenovo\Desktop\作业\python作业\警务信息分析基础\police_terms.txt"
        with open(file_path, "r", encoding='utf-8') as f:
            police_terms = [line.strip() for line in f if line.strip()]
        
        found_terms = [term for term in police_terms if term in text]
        if found_terms:
            return {
                'type': '涉警事件',
                'risk_level': len(found_terms),  # 根据关键词数量计算风险
                'keywords': found_terms
            }
        return {'type': '普通舆情', 'risk_level': 0, 'keywords': []}

    def sentiment_analysis(self, text):
        """基于SnowNLP的情感分析"""
        try:
            s = SnowNLP(text)
            return {
                'score': round(s.sentiments, 2),  # 情感得分(0-1)
                'label': 'positive' if s.sentiments > 0.6 else 'negative'  # 情感标签
            }
        except Exception as e:
            print(f"情感分析失败: {str(e)}")
            return {'score': 0.5, 'label': 'neutral'}  # 默认中性值
    def random_insertion(self, text, keywords):
        """
        针对警务语境的随机插入增强
        :param text: 原始文本
        :param keywords: 提取的关键词（用于选择插入位置关联词）
        :return: 增强后的文本
        """
        # 警务场景的高频关联词库（增强上下文相关性）
        police_related_terms = {
            "警察": ["执勤", "巡逻", "盘查", "出警"],
            "公安": ["行动", "通告", "联合执法", "专项治理"],
            "袭警": ["暴力抗法", "持械袭击", "围攻"],
            "执法": ["现场", "录像", "全过程", "文明"],
            "举报": ["实名", "匿名", "线索", "核查"],
            "舆情": ["发酵", "管控", "引导", "通报"]
        }
        
        # 筛选可插入的关键词（排除停用词）
        valid_keywords = [word for word in keywords if word in police_related_terms]
        if not valid_keywords:
            return text  # 无有效关键词时返回原文
        
        # 随机选择1个插入锚点词（避免过度增强）
        anchor_word = random.choice(valid_keywords)
        
        # 获取关联词并随机选择1个
        related_terms = police_related_terms.get(anchor_word, [])
        if not related_terms:
            return text
        insert_word = random.choice(related_terms)
        
        # 查找锚点词首次出现位置
        word_list = list(jieba.cut(text))  # 使用分词保证插入位置准确性
        try:
            anchor_index = word_list.index(anchor_word)
        except ValueError:
            return text  # 锚点词不在分词结果中（可能因分词差异）
        
        # 在锚点词后插入新词（符合中文语序习惯）
        word_list.insert(anchor_index + 1, insert_word)
        return "".join(word_list)