import requests
from bs4 import BeautifulSoup
import pandas as pd
from src.models.database import db, IndustryData
from src.services.llm_service import LLMService
import threading
import time
import uuid
from datetime import datetime, date
import json
import re
from urllib.parse import urljoin, urlparse
import os

class CrawlerService:
    """行业数据爬取服务"""
    
    def __init__(self):
        self.llm_service = LLMService()
        self.running_tasks = {}
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        
        # 预定义的数据源配置
        self.data_sources = {
            'stats.gov.cn': {
                'name': '国家统计局',
                'base_url': 'http://www.stats.gov.cn',
                'search_patterns': [
                    '/tjsj/zxfb/',  # 最新发布
                    '/tjsj/sjjd/',  # 数据解读
                ]
            },
            'pharmnet.com.cn': {
                'name': '医药网',
                'base_url': 'http://www.pharmnet.com.cn',
                'search_patterns': [
                    '/news/',       # 新闻资讯
                    '/market/',     # 市场分析
                ]
            },
            'cde.org.cn': {
                'name': '国家药监局药审中心',
                'base_url': 'https://www.cde.org.cn',
                'search_patterns': [
                    '/news/',       # 新闻动态
                    '/main/xxgk/',  # 信息公开
                ]
            }
        }
    
    def start_crawl_task(self, sources, keywords):
        """启动爬取任务"""
        task_id = str(uuid.uuid4())
        
        def run_crawl():
            try:
                self._execute_crawl_task(task_id, sources, keywords)
            except Exception as e:
                print(f"爬取任务失败 {task_id}: {str(e)}")
        
        # 在新线程中运行爬取任务
        thread = threading.Thread(target=run_crawl)
        thread.daemon = True
        thread.start()
        
        self.running_tasks[task_id] = {
            'thread': thread,
            'status': 'running',
            'sources': sources,
            'keywords': keywords,
            'started_at': datetime.now()
        }
        
        return task_id
    
    def _execute_crawl_task(self, task_id, sources, keywords):
        """执行爬取任务"""
        try:
            crawled_count = 0
            
            for source in sources:
                if source not in self.data_sources:
                    print(f"不支持的数据源: {source}")
                    continue
                
                print(f"开始爬取数据源: {source}")
                
                # 根据不同数据源执行不同的爬取策略
                if source == 'stats.gov.cn':
                    count = self._crawl_stats_gov(keywords)
                elif source == 'pharmnet.com.cn':
                    count = self._crawl_pharmnet(keywords)
                elif source == 'cde.org.cn':
                    count = self._crawl_cde(keywords)
                else:
                    count = self._crawl_generic_source(source, keywords)
                
                crawled_count += count
                print(f"从 {source} 爬取了 {count} 条数据")
                
                # 避免过于频繁的请求
                time.sleep(2)
            
            # 更新任务状态
            if task_id in self.running_tasks:
                self.running_tasks[task_id]['status'] = 'completed'
                self.running_tasks[task_id]['crawled_count'] = crawled_count
                self.running_tasks[task_id]['completed_at'] = datetime.now()
            
            print(f"爬取任务完成 {task_id}: 总共爬取 {crawled_count} 条数据")
            
        except Exception as e:
            if task_id in self.running_tasks:
                self.running_tasks[task_id]['status'] = 'failed'
                self.running_tasks[task_id]['error'] = str(e)
            raise e
    
    def _crawl_stats_gov(self, keywords):
        """爬取国家统计局数据"""
        crawled_count = 0
        
        try:
            # 爬取最新发布页面
            url = "http://www.stats.gov.cn/tjsj/zxfb/"
            response = self.session.get(url, timeout=10)
            response.encoding = 'utf-8'
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            # 查找新闻链接
            news_links = soup.find_all('a', href=True)
            
            for link in news_links[:20]:  # 限制爬取数量
                try:
                    href = link.get('href')
                    title = link.get_text().strip()
                    
                    if not title or len(title) < 10:
                        continue
                    
                    # 检查是否包含关键词
                    if not any(keyword in title for keyword in keywords):
                        continue
                    
                    # 构建完整URL
                    full_url = urljoin(url, href)
                    
                    # 爬取详细内容
                    content = self._fetch_article_content(full_url)
                    
                    if content:
                        # 保存到数据库
                        self._save_industry_data(
                            source='国家统计局',
                            title=title,
                            content=content,
                            url=full_url,
                            industry='医药',
                            data_type='统计数据'
                        )
                        crawled_count += 1
                    
                    time.sleep(1)  # 避免请求过快
                    
                except Exception as e:
                    print(f"爬取统计局文章失败: {str(e)}")
                    continue
            
        except Exception as e:
            print(f"爬取统计局失败: {str(e)}")
        
        return crawled_count
    
    def _crawl_pharmnet(self, keywords):
        """爬取医药网数据"""
        crawled_count = 0
        
        try:
            # 模拟爬取医药网新闻
            base_url = "http://www.pharmnet.com.cn"
            
            # 这里实现具体的医药网爬取逻辑
            # 由于实际网站结构复杂，这里提供框架代码
            
            # 示例：爬取新闻列表页
            news_urls = [
                "/news/industry/",
                "/news/market/",
                "/news/policy/"
            ]
            
            for news_url in news_urls:
                try:
                    full_url = base_url + news_url
                    response = self.session.get(full_url, timeout=10)
                    response.encoding = 'utf-8'
                    
                    soup = BeautifulSoup(response.content, 'html.parser')
                    
                    # 查找新闻链接（需要根据实际网站结构调整）
                    articles = soup.find_all('a', class_='news-title')  # 示例选择器
                    
                    for article in articles[:10]:
                        try:
                            title = article.get_text().strip()
                            href = article.get('href')
                            
                            if not any(keyword in title for keyword in keywords):
                                continue
                            
                            article_url = urljoin(full_url, href)
                            content = self._fetch_article_content(article_url)
                            
                            if content:
                                self._save_industry_data(
                                    source='医药网',
                                    title=title,
                                    content=content,
                                    url=article_url,
                                    industry='医药',
                                    data_type='行业新闻'
                                )
                                crawled_count += 1
                            
                            time.sleep(1)
                            
                        except Exception as e:
                            print(f"爬取医药网文章失败: {str(e)}")
                            continue
                    
                except Exception as e:
                    print(f"爬取医药网页面失败: {str(e)}")
                    continue
            
        except Exception as e:
            print(f"爬取医药网失败: {str(e)}")
        
        return crawled_count
    
    def _crawl_cde(self, keywords):
        """爬取药审中心数据"""
        crawled_count = 0
        
        try:
            # 实现药审中心数据爬取
            # 这里提供框架代码
            base_url = "https://www.cde.org.cn"
            
            # 由于实际实现需要处理复杂的网站结构和反爬机制
            # 这里提供基础框架
            
            print("药审中心数据爬取功能开发中...")
            
        except Exception as e:
            print(f"爬取药审中心失败: {str(e)}")
        
        return crawled_count
    
    def _crawl_generic_source(self, source, keywords):
        """通用数据源爬取"""
        crawled_count = 0
        
        try:
            # 通用爬取逻辑
            print(f"通用爬取逻辑处理数据源: {source}")
            
            # 这里可以实现通用的爬取策略
            # 比如基于sitemap、RSS等
            
        except Exception as e:
            print(f"通用爬取失败: {str(e)}")
        
        return crawled_count
    
    def _fetch_article_content(self, url):
        """获取文章详细内容"""
        try:
            response = self.session.get(url, timeout=10)
            response.encoding = 'utf-8'
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            # 移除脚本和样式
            for script in soup(["script", "style"]):
                script.decompose()
            
            # 尝试多种内容选择器
            content_selectors = [
                '.article-content',
                '.content',
                '.main-content',
                '#content',
                '.post-content',
                'article',
                '.news-content'
            ]
            
            content = ""
            for selector in content_selectors:
                content_elem = soup.select_one(selector)
                if content_elem:
                    content = content_elem.get_text().strip()
                    break
            
            # 如果没有找到特定选择器，使用body内容
            if not content:
                body = soup.find('body')
                if body:
                    content = body.get_text().strip()
            
            # 清理内容
            content = re.sub(r'\s+', ' ', content)
            content = content[:5000]  # 限制长度
            
            return content if len(content) > 100 else None
            
        except Exception as e:
            print(f"获取文章内容失败 {url}: {str(e)}")
            return None
    
    def _save_industry_data(self, source, title, content, url, industry, data_type):
        """保存行业数据到数据库"""
        try:
            # 检查是否已存在
            existing = IndustryData.query.filter_by(url=url).first()
            if existing:
                return
            
            # 使用LLM提取关键信息
            key_info = self.llm_service.extract_key_information(content)
            
            # 创建新记录
            industry_data = IndustryData(
                source=source,
                title=title,
                content=content,
                url=url,
                industry=industry,
                data_type=data_type,
                key_metrics=json.dumps(key_info.get('key_metrics', {}), ensure_ascii=False),
                publish_date=date.today(),  # 简化处理，实际应该解析发布日期
                status='active'
            )
            
            # 尝试提取数值信息
            if 'market_size' in key_info and key_info['market_size']:
                # 简单的数值提取（实际应该更复杂）
                size_text = str(key_info['market_size'])
                numbers = re.findall(r'[\d.]+', size_text)
                if numbers:
                    industry_data.market_size = float(numbers[0])
            
            if 'growth_rate' in key_info and key_info['growth_rate']:
                rate_text = str(key_info['growth_rate'])
                numbers = re.findall(r'[\d.]+', rate_text)
                if numbers:
                    industry_data.growth_rate = float(numbers[0])
            
            db.session.add(industry_data)
            db.session.commit()
            
        except Exception as e:
            db.session.rollback()
            print(f"保存行业数据失败: {str(e)}")
    
    def process_uploaded_document(self, file, source, industry, title):
        """处理上传的文档"""
        try:
            # 读取文件内容
            content = ""
            filename = file.filename.lower()
            
            if filename.endswith('.txt'):
                content = file.read().decode('utf-8')
            elif filename.endswith('.pdf'):
                # 这里需要安装PDF处理库
                # import PyPDF2
                # 简化处理
                content = "PDF文档内容提取功能开发中"
            elif filename.endswith(('.doc', '.docx')):
                # 这里需要安装Word处理库
                # import python-docx
                content = "Word文档内容提取功能开发中"
            else:
                raise ValueError("不支持的文件格式")
            
            if len(content) < 50:
                raise ValueError("文档内容过短")
            
            # 使用LLM提取关键信息
            key_info = self.llm_service.extract_key_information(content)
            
            # 保存到数据库
            industry_data = IndustryData(
                source=source,
                title=title,
                content=content[:5000],  # 限制长度
                url=f"upload://{filename}",
                industry=industry,
                data_type='上传文档',
                key_metrics=json.dumps(key_info.get('key_metrics', {}), ensure_ascii=False),
                publish_date=date.today(),
                status='active'
            )
            
            db.session.add(industry_data)
            db.session.commit()
            
            return {
                'id': industry_data.id,
                'title': title,
                'content_length': len(content),
                'key_info': key_info
            }
            
        except Exception as e:
            db.session.rollback()
            raise e
    
    def get_industry_data_statistics(self):
        """获取行业数据统计"""
        try:
            # 基础统计
            total_records = IndustryData.query.filter_by(status='active').count()
            
            # 按数据源统计
            source_stats = db.session.query(
                IndustryData.source,
                db.func.count(IndustryData.id).label('count')
            ).filter_by(status='active').group_by(IndustryData.source).all()
            
            # 按数据类型统计
            type_stats = db.session.query(
                IndustryData.data_type,
                db.func.count(IndustryData.id).label('count')
            ).filter_by(status='active').group_by(IndustryData.data_type).all()
            
            # 最近爬取时间
            latest_crawl = db.session.query(
                db.func.max(IndustryData.crawl_date)
            ).scalar()
            
            return {
                'total_records': total_records,
                'by_source': [
                    {'source': stat.source, 'count': stat.count}
                    for stat in source_stats
                ],
                'by_type': [
                    {'type': stat.data_type, 'count': stat.count}
                    for stat in type_stats
                ],
                'latest_crawl': latest_crawl.isoformat() if latest_crawl else None,
                'supported_sources': list(self.data_sources.keys())
            }
            
        except Exception as e:
            raise e
    
    def get_crawl_task_status(self, task_id):
        """获取爬取任务状态"""
        if task_id in self.running_tasks:
            return self.running_tasks[task_id]
        else:
            return {'status': 'not_found', 'message': '任务不存在'}
    
    def cancel_crawl_task(self, task_id):
        """取消爬取任务"""
        if task_id in self.running_tasks:
            self.running_tasks[task_id]['status'] = 'cancelled'
            # 注意：Python线程无法强制终止
            return True
        return False

