import os
import sys
import json
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Dict, Any
import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime
import logging

# 添加项目路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class DataCrawler:
    """数据爬虫服务类"""
    
    def __init__(self):
        self.embeddings = DashScopeEmbeddings(
            dashscope_api_key=os.getenv("DASHSCOPE_API_KEY"),
            model="text-embedding-v4"
        )
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len,
        )
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })
        
        # 设置向量数据库路径
        self.data_dir = "Didi/ai/data"
        os.makedirs(self.data_dir, exist_ok=True)
        self.vector_db_path = f"{self.data_dir}/vector_db"
        
    def crawl_policy_documents(self) -> List[Dict[str, Any]]:
        """爬取政策法规文档"""
        policy_urls = [
            {
                "url": "https://www.gov.cn/zhengce/2022-12/06/content_5730384.htm",
                "title": "网络预约出租汽车经营服务管理暂行办法",
                "category": "政策法规"
            },
            {
                "url": "https://www.gov.cn/zhengce/zhengceku/202504/content_7017689.htm", 
                "title": "交通强国建设试点工作管理办法",
                "category": "政策法规"
            },
            # 新增专业网约车司机文档
            {
                "url": "https://jtys.foshan.gov.cn/gkmlpt/content/6/6160/mpost_6160256.html",
                "title": "佛山市网络预约出租汽车驾驶员证申请指引",
                "category": "法规与资质"
            },
            {
                "url": "http://m.toutiao.com/group/7521729703007322651/",
                "title": "昆明市网络预约出租汽车经营服务管理实施细则",
                "category": "法规与资质"
            },
            {
                "url": "https://m.renrendoc.com/paper/444913980.html",
                "title": "道路客运汽车驾驶员基础技能培训手册",
                "category": "基础技能与培训"
            },
            {
                "url": "https://m.renrendoc.com/paper/443659066.html",
                "title": "网络预约出租汽车司机基础技能培训手册",
                "category": "基础技能与培训"
            },
            {
                "url": "http://m.163.com/dy/article/K2EB17U30556C0VE.html",
                "title": "喜行约车网约车安全行车指南",
                "category": "安全与规范"
            },
            {
                "url": "http://m.toutiao.com/group/7530588332549276200/",
                "title": "东莞交警网约车驾驶员安全宣教手册",
                "category": "安全与规范"
            },
            {
                "url": "https://m.hexun.com/auto/2024-08-29/214227479.html",
                "title": "网约车司机收入构成与市场趋势分析",
                "category": "运营与收入"
            },
            {
                "url": "https://blog.51cto.com/u_11956468/13690910",
                "title": "滴滴派单算法原理与优化策略",
                "category": "运营与收入"
            },
            {
                "url": "https://www.pcauto.com.cn/ask/1304703.html",
                "title": "网约车车险购买指南",
                "category": "车辆与保险"
            },
            {
                "url": "https://www.autohome.com.cn/ask/8958395.html",
                "title": "电动车跑滴滴电池保养指南",
                "category": "车辆与保险"
            },
            {
                "url": "http://m.toutiao.com/group/7525761711941370387/",
                "title": "网约车司机肠胃病防治手册",
                "category": "健康与权益"
            },
            {
                "url": "https://www.wuhan.gov.cn/sy/whyw/202312/t20231218_2322332.shtml",
                "title": "武汉市网约车驾驶员合同风险提示",
                "category": "健康与权益"
            },
            {
                "url": "https://m.sohu.com/a/901636429_122129912/",
                "title": "新手司机服务分提升全攻略",
                "category": "工具与策略"
            },
            {
                "url": "https://cloud.tencent.com/developer/news/2372308",
                "title": "多平台接单神器使用指南",
                "category": "工具与策略"
            },
            {
                "url": "https://www.autohome.com.cn/ask/15568784.html",
                "title": "滴滴网约车司机社群加入指南",
                "category": "社区与动态"
            },
            {
                "url": "https://m.36kr.com/p/1722163412993",
                "title": "Uber司机论坛运营启示录",
                "category": "社区与动态"
            },
            {
                "url": "https://www.chinatax.gov.cn/chinatax/n810219/n810724/c5237287/content.html",
                "title": "网约车司机涉税信息报送政策解读",
                "category": "税务与合规"
            },
            {
                "url": "https://m.lawtime.cn/wenda/q_51597845.html",
                "title": "网约车技术故障处理流程",
                "category": "税务与合规"
            },
            {
                "url": "https://www.lsz.gov.cn/zwfw/jcfw/jtcx/wyc/",
                "title": "网约车行业技术创新白皮书",
                "category": "行业与趋势"
            },
            {
                "url": "https://www.putian.gov.cn/znhdwdk/sjtysj/dlys/202312/t20231207_1886367.htm",
                "title": "2025年网约车市场趋势报告",
                "category": "行业与趋势"
            }
        ]
        
        results = []
        with ThreadPoolExecutor(max_workers=3) as executor:
            future_to_url = {
                executor.submit(self._crawl_single_page, url_info): url_info 
                for url_info in policy_urls
            }
            
            for future in as_completed(future_to_url):
                try:
                    result = future.result()
                    if result:
                        results.append(result)
                        logger.info(f"成功爬取: {result['title']}")
                except Exception as e:
                    logger.error(f"爬取失败: {e}")
                    
        return results
    
    def crawl_didi_knowledge(self) -> List[Dict[str, Any]]:
        """爬取滴滴平台知识库"""
        didi_urls = [
            {
                "url": "http://www.9show.net/didi/zhuce/2022/0908/47.html",
                "title": "滴滴车主注册流程",
                "category": "注册流程"
            },
            {
                "url": "http://www.9show.net/didi/dayi/2022/0218/44.html",
                "title": "实时单与预约单的区别",
                "category": "订单类型"
            },
            {
                "url": "http://www.9show.net/didi/xinsiji/2022/0217/43.html",
                "title": "公司车辆是否可以加入滴滴",
                "category": "车辆要求"
            },
            {
                "url": "http://www.9show.net/didi/dayi/2022/0210/41.html",
                "title": "滴滴登录手机号更换",
                "category": "账号管理"
            },
            {
                "url": "http://www.9show.net/didi/xinsiji/2021/1027/22.html",
                "title": "跑滴滴如何能接到更多订单",
                "category": "接单技巧"
            },
            {
                "url": "http://www.didibj.com/didi/dayi/2022/0106/36.html",
                "title": "滴滴网约车司机管理和派单规则",
                "category": "派单规则"
            }
        ]
        
        results = []
        with ThreadPoolExecutor(max_workers=4) as executor:
            future_to_url = {
                executor.submit(self._crawl_single_page, url_info): url_info 
                for url_info in didi_urls
            }
            
            for future in as_completed(future_to_url):
                try:
                    result = future.result()
                    if result:
                        results.append(result)
                        logger.info(f"成功爬取: {result['title']}")
                except Exception as e:
                    logger.error(f"爬取失败: {e}")
                    
        return results
    
    def _crawl_single_page(self, url_info: Dict[str, str]) -> Dict[str, Any]:
        """爬取单个页面"""
        try:
            response = self.session.get(url_info["url"], timeout=10)
            response.raise_for_status()
            response.encoding = 'utf-8'
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 提取正文内容
            content = self._extract_content(soup)
            
            if content:
                return {
                    "title": url_info["title"],
                    "url": url_info["url"],
                    "category": url_info["category"],
                    "content": content,
                    "crawl_time": datetime.now().isoformat(),
                    "source": "web_crawler"
                }
        except Exception as e:
            logger.error(f"爬取页面失败 {url_info['url']}: {e}")
            return None
    
    def _extract_content(self, soup: BeautifulSoup) -> str:
        """提取页面正文内容"""
        # 移除脚本和样式
        for script in soup(["script", "style"]):
            script.decompose()
        
        # 尝试多种选择器提取内容
        content_selectors = [
            'div.content',
            'div.article-content',
            'div.main-content',
            'article',
            'div.post-content',
            'div.entry-content',
            'body'
        ]
        
        content = ""
        for selector in content_selectors:
            elements = soup.select(selector)
            if elements:
                content = elements[0].get_text(strip=True)
                if len(content) > 100:  # 确保内容足够长
                    break
        
        return content
    
    def process_data(self, raw_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """处理爬取的原始数据"""
        processed_data = []
        
        for item in raw_data:
            if not item or not item.get('content'):
                continue
                
            # 数据清洗
            cleaned_content = self._clean_content(item['content'])
            
            if len(cleaned_content) > 50:  # 过滤太短的内容
                processed_item = {
                    "title": item['title'],
                    "url": item['url'],
                    "category": item['category'],
                    "content": cleaned_content,
                    "crawl_time": item['crawl_time'],
                    "source": item['source'],
                    "word_count": len(cleaned_content),
                    "processed_time": datetime.now().isoformat()
                }
                processed_data.append(processed_item)
        
        return processed_data
    
    def _clean_content(self, content: str) -> str:
        """清洗文本内容"""
        import re
        
        # 移除多余的空白字符
        content = re.sub(r'\s+', ' ', content)
        
        # 移除特殊字符
        content = re.sub(r'[^\w\s\u4e00-\u9fff。，！？；：""''（）【】]', '', content)
        
        # 移除广告和无关内容
        ad_patterns = [
            r'广告',
            r'推广',
            r'点击',
            r'关注我们',
            r'版权所有'
        ]
        
        for pattern in ad_patterns:
            content = re.sub(pattern, '', content)
        
        return content.strip()
    
    def create_vector_database(self, processed_data: List[Dict[str, Any]]) -> str:
        """创建向量数据库"""
        try:
            # 准备文档
            documents = []
            for item in processed_data:
                # 将内容分块
                chunks = self.text_splitter.split_text(item['content'])
                
                for i, chunk in enumerate(chunks):
                    doc = {
                        "page_content": chunk,
                        "metadata": {
                            "title": item['title'],
                            "category": item['category'],
                            "url": item['url'],
                            "chunk_id": i,
                            "source": item['source'],
                            "crawl_time": item['crawl_time']
                        }
                    }
                    documents.append(doc)
            
            # 分批创建向量存储，避免batch size限制
            batch_size = 5  # 减小batch size
            vectorstore = None
            
            for i in range(0, len(documents), batch_size):
                batch = documents[i:i + batch_size]
                texts = [doc["page_content"] for doc in batch]
                metadatas = [doc["metadata"] for doc in batch]
                
                if vectorstore is None:
                    # 第一批创建向量存储
                    vectorstore = FAISS.from_texts(
                        texts=texts,
                        embedding=self.embeddings,
                        metadatas=metadatas
                    )
                else:
                    # 后续批次添加到现有向量存储
                    vectorstore.add_texts(texts=texts, metadatas=metadatas)
            
            # 保存向量数据库
            vectorstore.save_local(self.vector_db_path)
            
            logger.info(f"向量数据库创建成功，共 {len(documents)} 个文档块")
            return self.vector_db_path
            
        except Exception as e:
            logger.error(f"创建向量数据库失败: {e}")
            return None
    
    def crawl_all_sources(self) -> List[Dict[str, Any]]:
        """爬取所有数据源"""
        logger.info("开始爬取所有数据源...")
        
        all_data = []
        
        # 1. 爬取政策法规
        try:
            policy_data = self.crawl_policy_documents()
            all_data.extend(policy_data)
            logger.info(f"政策法规数据爬取完成，共 {len(policy_data)} 条")
        except Exception as e:
            logger.error(f"政策法规数据爬取失败: {e}")
        
        # 2. 爬取滴滴知识库
        try:
            didi_data = self.crawl_didi_knowledge()
            all_data.extend(didi_data)
            logger.info(f"滴滴知识库数据爬取完成，共 {len(didi_data)} 条")
        except Exception as e:
            logger.error(f"滴滴知识库数据爬取失败: {e}")
        
        # 3. 处理数据
        processed_data = self.process_data(all_data)
        logger.info(f"数据处理完成，共 {len(processed_data)} 条有效数据")
        
        return processed_data
    
    def run_full_crawl(self) -> Dict[str, Any]:
        """执行完整的数据收集流程"""
        logger.info("开始数据收集流程...")
        
        # 1. 爬取政策法规
        logger.info("开始爬取政策法规...")
        policy_data = self.crawl_policy_documents()
        
        # 2. 爬取滴滴知识库
        logger.info("开始爬取滴滴知识库...")
        didi_data = self.crawl_didi_knowledge()
        
        # 3. 合并数据
        all_raw_data = policy_data + didi_data
        logger.info(f"共收集到 {len(all_raw_data)} 个原始数据")
        
        # 4. 数据处理
        logger.info("开始数据处理...")
        processed_data = self.process_data(all_raw_data)
        logger.info(f"处理完成，共 {len(processed_data)} 个有效数据")
        
        # 5. 创建向量数据库
        logger.info("开始创建向量数据库...")
        vector_db_path = self.create_vector_database(processed_data)
        
        # 6. 保存原始数据
        data_save_path = "Didi/ai/data/raw_data.json"
        os.makedirs(os.path.dirname(data_save_path), exist_ok=True)
        
        with open(data_save_path, 'w', encoding='utf-8') as f:
            json.dump(processed_data, f, ensure_ascii=False, indent=2)
        
        return {
            "raw_data_count": len(all_raw_data),
            "processed_data_count": len(processed_data),
            "vector_db_path": vector_db_path,
            "raw_data_path": data_save_path,
            "status": "success"
        }

 