import time
from base_spider import BasePolicySpider
import re

"""北京市人民政府国有资产监督管理委员会 政策文件 通知公告"""

class GzwPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://gzw.beijing.gov.cn/xxfb/{t}/index{p}.html")
        self.cookies = {
                "Path": "/",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749535199%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "JSESSIONID": "Zjc3NzYxNGEtZWY1YS00Njk1LWE5MDktMDQwMDZiYmMxNTgz",
                "arialoadData": "false",
                "__jsluid_s": "6edb09e79a60d21197c0bd0a1479e190",
                "_va_id": "b1d08f7d3c18ece9.1743392345.19.1749541189.1749530164."
            }
        self.get_categories = [
            '2024zcwj',
            'tzgg'
        ]


    def extract_list_from_page(self, soup, t,url):
        div = soup.find('div', {'class': 'public_list_team'})

        page_list_href = ['/'.join(url.split('/')[:-1])+'/'+a.find('a')['href'].lstrip('.').strip() if 'http' not in a.find('a')['href'] else a.find('a')['href'].strip() for a in div.find_all('li')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url)

    def parse_detail_page(self, detail_url, t=''):
        # detail_url = 'https://gzw.beijing.gov.cn/xxfb/2024zcwj/202405/t20240508_3665460.html'
        soup = self.get_detail(detail_url)
        if not soup:
            return None
        # 正文
        content = soup.find_all('div', {'class': 'container'})[-1]

        if not content:
            return None
        # 标题
        title = content.find('h2').text.strip()

        # 正文文本内容
        div_zhengwen = content.find('div', {'id': 'div_zhengwen'})

        detail_content = div_zhengwen.text
        # 附件板块

        fujian_list =content.find('div',{'class': 'fujian'}).find_all('a')
        #附件链接
        attach_url = [c['href'] for c in  fujian_list]
        base_url = '/'.join(detail_url.split('/')[:-1])
        attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        #附件路径
        file_name = f'D:\work\code\政策\{self.city}\{self.platform}\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)
        

        dic = {
            'detail_url':detail_url,
            'platform': self.platform,
            'city': self.city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }


        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')