import requests
from bs4 import BeautifulSoup
import re
import csv
from urllib.parse import quote
import time
import logging
from config.keywords_config import KeywordsConfig
from utils.logger import setup_logger
from utils.file_utils import save_to_csv

class WeiboCrawler:
    """微博爬虫类，用于从微博搜索结果中提取区块链地址"""
    
    def __init__(self, config):
        self.config = config
        self.addresses = set()  # 存储元组 (地址, 类型)
        self.session = requests.Session()
        self.session.headers.update(self.config.headers)
        self.logger = logging.getLogger('weibo_crawler')

    def fetch_page(self, keyword, page):
        """获取并解析指定关键词和页码的微博页面"""
        url = self.config.base_url.format(quote(keyword), page)
        for attempt in range(self.config.retry_count):
            try:
                response = self.session.get(url, timeout=10)
                if response.status_code == 200:
                    should_skip = self.parse_html(response.text)
                    return should_skip
                else:
                    self.logger.warning(f'请求失败，状态码：{response.status_code}，尝试第 {attempt + 1} 次重试')
            except Exception as e:
                self.logger.warning(f'请求异常：{str(e)}，尝试第 {attempt + 1} 次重试')
            if attempt < self.config.retry_count - 1:
                time.sleep(self.config.retry_delay)
        self.logger.error(f'多次尝试后仍无法获取页面：{url}')
        return False

    def parse_html(self, html):
        """解析HTML内容，提取区块链地址"""
        soup = BeautifulSoup(html, 'html.parser')
        selectors = [
            'div.card-wrap',
            'div[node-type="feed_list_item"]',
            'div[class*="weibo-text"]'
        ]
        cards = []
        for selector in selectors:
            cards.extend(soup.select(selector))
        if not cards:
            self.logger.warning("未找到任何微博卡片，跳过当前关键词。")
            return True

        for card in cards:
            text = card.get_text()
            matches = self.config.address_pattern.findall(text)
            for match in matches:
                # 提取具体地址并判断类型
                address, addr_type = self._extract_address_info(match)
                if address:
                    self.addresses.add((address, addr_type))
                    self.logger.info(f"找到地址：{address}，类型：{addr_type}")
        return False

    def _extract_address_info(self, match):
        """从正则匹配结果中提取地址和类型信息"""
        if match[0]:  # 以太坊
            return match[0], 'ETH'
        elif match[1]:  # TRON
            return match[1], 'TRON'
        elif match[2]:  # 比特币Legacy/P2SH
            return match[2], 'BTC'
        elif match[3]:  # 比特币Bech32
            return match[3], 'BTC_SegWit'
        return None, None

    def run(self):
        """运行爬虫程序"""
        try:
            self.logger.info(f'开始爬取微博数据，共{len(self.config.keywords)}个关键词')
            for keyword in self.config.keywords:
                self.logger.info(f'正在爬取关键词：{keyword}')
                for page in range(1, self.config.pages_per_keyword + 1):
                    self.logger.info(f'第{page}页')
                    should_skip = self.fetch_page(keyword, page)
                    if should_skip:
                        break
                    time.sleep(self.config.retry_delay)  # 避免频繁请求
            
            # 保存结果
            save_to_csv(self.addresses, self.config.output_file)
            self.logger.info(f'完成！共爬取到{len(self.addresses)}条地址')
            return len(self.addresses)
        except KeyboardInterrupt:
            self.logger.warning("程序被手动中断，正在保存已爬取的数据...")
            save_to_csv(self.addresses, self.config.output_file)
            self.logger.info(f'已保存{len(self.addresses)}条地址')
            return len(self.addresses)
        except Exception as e:
            self.logger.error(f'程序运行出错：{str(e)}')
            save_to_csv(self.addresses, self.config.output_file)
            self.logger.info(f'已保存{len(self.addresses)}条地址')
            return len(self.addresses)

if __name__ == '__main__':
    # 配置日志
    setup_logger()
    logger = logging.getLogger('weibo_crawler')
    
    # 加载配置
    config = KeywordsConfig()
    
    # 运行爬虫
    logger.info('启动微博区块链地址爬虫...')
    crawler = WeiboCrawler(config)
    crawler.run()    