import time
from base_spider import BasePolicySpider
import re
from bs4 import BeautifulSoup

"""北京市交通委员会 政策文件 通知公告"""

class JtwPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://jtw.beijing.gov.cn/xxgk/{t}/index{p}.html")
        self.cookies = {
                "Path": "/",
                "lcid": "1043",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749781037%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "__jsluid_s": "91d9c7bbaebc2bf4a3f597333e916fcd",
                "_yfxkpy_ssid_10002881": "%7B%22_yfxkpy_firsttime%22%3A%221749781068568%22%2C%22_yfxkpy_lasttime%22%3A%221749781068568%22%2C%22_yfxkpy_visittime%22%3A%221749781068568%22%2C%22_yfxkpy_cookie%22%3A%2220250613101748569433842001877576%22%7D",
                "JSESSIONID": "YTIxZjJhYTUtNTljMS00MWE4LWI0NmUtMTA2NWFkYmFkYTY5",
                "_va_id": "b1d08f7d3c18ece9.1743392345.25.1749781370.1749781037."
            }
        self.get_categories = ['2024zcwj','tzgg']


    def extract_list_from_page(self, soup, t,url):

        div = soup.find('div', {'class': 'news_list'})
        if t=='tzgg':
            div = soup.find('div', {'class': 'publicity'})
        page_list_href = ['/'.join(url.split('/')[:-1])+'/'+a.find('a')['href'].lstrip('.').strip() if 'http' not in a.find('a')['href'] else a.find('a')['href'].strip() for a in div.find_all('li')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url,t)
    def parse_detail_page(self, detail_url,t):

        # detail_url = 'https://nyncj.beijing.gov.cn/nyj/zwgk/zcgk/zcwj3149/543443032/index.html'
        print('详情页链接', detail_url)
        soup = self.get_detail(detail_url)
        if not soup:
            return None
        # 正文
        content = soup.find('div', {'class': 'article_m'})
        if not content:
            return None
        # 标题
        title = content.find('h2').text.strip()
        # 正文文本内容
        div_zhengwen = content.find('div', {'class': 'article_i'})
        detail_content = div_zhengwen.text
        # 附件板块
        fujian_list = [i for i in content.find_all('a') if i.get('href','') and self.get_url_houzhui(i.get('href','')) in self.attachment_extensions]
        #附件链接
        # attach_url = ["https://nyncj.beijing.gov.cn/"+c['href'] for c in fujian_list]

        attach_url = [d['href'] for d in fujian_list]
        if t=='tzgg':
            base_url = '/'.join(detail_url.split('/')[:-1])
            attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        #附件路径
        file_name = f'D:\work\code\政策\{self.city}\{self.platform}\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)


        dic = {
            'detail_url':detail_url,
            'platform': self.platform,
            'city': self.city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }

        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')
        # ：北京 platform ：北京规划和自然资源委员会  模块： 第 109 页