import scrapy
import json
import re
from datetime import datetime
from scrapy_redis.spiders import RedisSpider
from finance_scraper.items import FinanceDataItem


class FinanceMinistrySpider(RedisSpider):
    name = "finance_ministry"
    allowed_domains = ["mof.gov.cn"]
    redis_key = "finance_ministry:start_urls"
    
    # 当Redis不可用时使用这些URLs
    start_urls = [
        "http://www.mof.gov.cn/gkml/caizhengshuju/",
        "http://www.mof.gov.cn/gkml/caizhengshuju/index_1.htm"
    ]
    
    def __init__(self, *args, **kwargs):
        super(FinanceMinistrySpider, self).__init__(*args, **kwargs)
        self.base_url = "http://www.mof.gov.cn"

    # 确保在Redis不可用时可以使用start_urls
    def start_requests(self):
        try:
            # 尝试使用Redis获取起始URL
            return super().start_requests()
        except Exception as e:
            self.logger.error(f"Redis连接错误: {e}")
            self.logger.info("使用默认start_urls进行爬取")
            # 直接使用start_urls作为起始URL
            for url in self.start_urls:
                yield scrapy.Request(url, callback=self.parse)
    
    def parse(self, response):
        # 找到所有数据发布的链接
        # 更新选择器以匹配新网站结构
        data_links = response.css(".xwzxtbl a::attr(href)").getall()
        if not data_links:
            # 尝试其他可能的选择器
            data_links = response.css(".list_r a::attr(href)").getall()
        if not data_links:
            # 再尝试其他可能的选择器
            data_links = response.css("a::attr(href)").re(r'.*caizhengshuju.*\.htm$')
        
        self.logger.info(f"找到 {len(data_links)} 个数据链接")
        
        # 处理每个数据链接
        for link in data_links:
            # 将相对URL转换为绝对URL
            if link.startswith("/"):
                link = self.base_url + link
            elif not link.startswith("http"):
                link = response.urljoin(link)
            
            self.logger.debug(f"处理链接: {link}")
            yield scrapy.Request(link, callback=self.parse_data_page)
        
        # 跟踪分页链接
        next_page = response.css("a:contains('下一页')::attr(href)").get()
        if not next_page:
            next_page = response.css(".fy a:nth-last-child(2)::attr(href)").get()
        
        if next_page:
            if not next_page.startswith("http"):
                next_page = response.urljoin(next_page)
            self.logger.info(f"跟踪下一页: {next_page}")
            yield scrapy.Request(next_page, callback=self.parse)
    
    def parse_data_page(self, response):
        # 尝试不同的选择器获取标题
        title = response.css(".xqy_top h1::text, .xxk_top h1::text, .xwzxtit h1::text").get()
        if not title:
            self.logger.warning(f"未在页面中找到标题: {response.url}")
            return
        
        self.logger.info(f"解析数据页: {title}")
        
        # 确定数据类型
        data_type = self.determine_data_type(title)
        if not data_type:
            self.logger.warning(f"未能确定数据类型: {title}")
            return
        
        # 提取日期
        pub_date_text = response.css(".daty span::text, .xxk_top span::text, .xwzxtit span::text").get()
        pub_date = None
        if pub_date_text:
            date_match = re.search(r'(\d{4}-\d{2}-\d{2})', pub_date_text)
            if date_match:
                pub_date = date_match.group(1)
        
        # 根据数据类型提取内容
        if data_type == "revenue" or data_type == "expenditure":
            yield from self.extract_fiscal_data(response, data_type, title, pub_date)
        elif data_type == "tax":
            yield from self.extract_tax_data(response, title, pub_date)
        elif data_type == "fund":
            yield from self.extract_fund_data(response, title, pub_date)
    
    def determine_data_type(self, title):
        if any(kw in title for kw in ["财政收入", "一般公共预算收入"]):
            return "revenue"
        elif any(kw in title for kw in ["财政支出", "一般公共预算支出"]):
            return "expenditure"
        elif any(kw in title for kw in ["税收", "税务"]):
            return "tax"
        elif any(kw in title for kw in ["政府性基金", "基金预算"]):
            return "fund"
        return None
    
    def extract_fiscal_data(self, response, data_type, title, pub_date):
        tables = response.css("table")
        
        # 如果没有找到表格，尝试在内容框架中查找
        if not tables:
            content = response.css("#zoom, .TRS_Editor, .xxy_contant")
            if content:
                tables = content.css("table")
        
        self.logger.info(f"找到 {len(tables)} 个表格")
        
        # 处理所有表格中的数据
        for table in tables:
            rows = table.css("tr")
            for row in rows:
                cells = row.css("td")
                if len(cells) < 2:
                    continue
                    
                # 提取数据
                item_name = cells[0].css("::text").get()
                if not item_name:
                    continue
                    
                # 跳过表头行
                if "收入" in item_name or "支出" in item_name:
                    continue
                    
                try:
                    amount_text = cells[1].css("::text").get().strip()
                    growth_text = cells[-1].css("::text").get().strip() if len(cells) > 2 else ""
                    
                    # 清理数值
                    amount = self.extract_number(amount_text)
                    growth_rate = self.extract_number(growth_text)
                    
                    # 确定是国家级还是地方级
                    level = "national" if "全国" in title else "local"
                    
                    # 提取年份和月份
                    year_match = re.search(r'(\d{4})年', title)
                    month_match = re.search(r'(\d{1,2})月', title)
                    
                    year = year_match.group(1) if year_match else datetime.now().year
                    month = month_match.group(1) if month_match else None
                    
                    item = FinanceDataItem()
                    item['data_type'] = data_type
                    item['level'] = level
                    item['year'] = year
                    item['month'] = month
                    item['date'] = pub_date
                    item['amount'] = amount
                    item['growth_rate'] = growth_rate
                    item['raw_data'] = {
                        'title': title,
                        'name': item_name,
                        'amount_text': amount_text,
                        'growth_text': growth_text
                    }
                    
                    self.logger.info(f"提取到数据项: {item_name}, 金额: {amount}, 增长率: {growth_rate}")
                    yield item
                except Exception as e:
                    self.logger.error(f"提取财政数据时出错: {e}")
    
    def extract_tax_data(self, response, title, pub_date):
        tables = response.css("table")
        
        # 如果没有找到表格，尝试在内容框架中查找
        if not tables:
            content = response.css("#zoom, .TRS_Editor, .xxy_contant")
            if content:
                tables = content.css("table")
        
        for table in tables:
            rows = table.css("tr")
            for row in rows:
                cells = row.css("td")
                if len(cells) < 2:
                    continue
                    
                tax_type = cells[0].css("::text").get()
                if not tax_type or "税" not in tax_type:
                    continue
                    
                try:
                    amount_text = cells[1].css("::text").get().strip()
                    growth_text = cells[-1].css("::text").get().strip() if len(cells) > 2 else ""
                    
                    tax_amount = self.extract_number(amount_text)
                    tax_growth_rate = self.extract_number(growth_text)
                    
                    # 提取年份和月份
                    year_match = re.search(r'(\d{4})年', title)
                    month_match = re.search(r'(\d{1,2})月', title)
                    
                    year = year_match.group(1) if year_match else datetime.now().year
                    month = month_match.group(1) if month_match else None
                    
                    item = FinanceDataItem()
                    item['data_type'] = "tax"
                    item['tax_type'] = tax_type
                    item['year'] = year
                    item['month'] = month
                    item['date'] = pub_date
                    item['tax_amount'] = tax_amount
                    item['tax_growth_rate'] = tax_growth_rate
                    item['raw_data'] = {
                        'title': title,
                        'amount_text': amount_text,
                        'growth_text': growth_text
                    }
                    
                    self.logger.info(f"提取到税收数据: {tax_type}, 金额: {tax_amount}")
                    yield item
                except Exception as e:
                    self.logger.error(f"提取税收数据时出错: {e}")
    
    def extract_fund_data(self, response, title, pub_date):
        tables = response.css("table")
        
        # 如果没有找到表格，尝试在内容框架中查找
        if not tables:
            content = response.css("#zoom, .TRS_Editor, .xxy_contant")
            if content:
                tables = content.css("table")
        
        for table in tables:
            rows = table.css("tr")
            for row in rows:
                cells = row.css("td")
                if len(cells) < 3:
                    continue
                    
                fund_type = cells[0].css("::text").get()
                if not fund_type:
                    continue
                    
                try:
                    revenue_text = cells[1].css("::text").get().strip()
                    expenditure_text = cells[2].css("::text").get().strip() if len(cells) > 2 else ""
                    growth_text = cells[-1].css("::text").get().strip() if len(cells) > 3 else ""
                    
                    fund_revenue = self.extract_number(revenue_text)
                    fund_expenditure = self.extract_number(expenditure_text)
                    fund_growth_rate = self.extract_number(growth_text)
                    
                    # 提取年份和月份
                    year_match = re.search(r'(\d{4})年', title)
                    month_match = re.search(r'(\d{1,2})月', title)
                    
                    year = year_match.group(1) if year_match else datetime.now().year
                    month = month_match.group(1) if month_match else None
                    
                    item = FinanceDataItem()
                    item['data_type'] = "fund"
                    item['fund_type'] = fund_type
                    item['year'] = year
                    item['month'] = month
                    item['date'] = pub_date
                    item['fund_revenue'] = fund_revenue
                    item['fund_expenditure'] = fund_expenditure
                    item['fund_growth_rate'] = fund_growth_rate
                    item['raw_data'] = {
                        'title': title,
                        'revenue_text': revenue_text,
                        'expenditure_text': expenditure_text,
                        'growth_text': growth_text
                    }
                    
                    self.logger.info(f"提取到基金数据: {fund_type}")
                    yield item
                except Exception as e:
                    self.logger.error(f"提取基金数据时出错: {e}")
    
    def extract_number(self, text):
        """从文本中提取数值"""
        if not text:
            return None
            
        # 移除非数字字符（保留数字、小数点和负号）
        num_text = re.sub(r'[^\d.-]', '', text)
        
        try:
            return float(num_text) if num_text else None
        except:
            return None
