import time
from base_spider import BasePolicySpider
import re

"""北京市商务局 通知公告"""

class ShangWuPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://sw.beijing.gov.cn/tzgg/index{p}.html"
        )
        self.cookies = {
                "Path": "/",
                "__jsluid_s": "156fa28d06f89d986b6ff24ba469ede6",
                "JSESSIONID": "NGRhMWYwNGEtYTQ0OS00ZDk1LTk3M2QtNjY3OWViNzc5Zjg4",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749474303%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "_va_id": "b1d08f7d3c18ece9.1743392345.14.1749474407.1749472320."
            }


    def extract_list_from_page(self, soup, t,url):
        div = soup.find('div', {'class': 'newslist'})
        page_list_href = ['https://sw.beijing.gov.cn/tzgg/'+a['href'].lstrip('.').strip() for a in div.find_all('a')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url)
    def parse_detail_page(self, detail_url):
        soup = self.get_detail(detail_url)
        if not soup:
            return None
        content = soup.find('div', {'class': 'news_detail'})
        title = content.find('h3').text.strip()
        detail_content = content.find('div', {'class': 'detail'}).text
        fujian_div = content.find('div', {'class': 'TRS_UEDITOR trs_key4format'})
        attach_url = re.findall(r'<a href="(.*?)" target="_blank">', str(fujian_div))
        base_url = '/'.join(detail_url.split('/')[:-1])
        attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        fujian_list = [a for a in fujian_div.find_all('a')]
        file_name = 'D:\work\code\政策\北京\北京市商务局\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)

        platform = '北京市商务局'
        city = '北京'
        dic = {
            'detail_url':detail_url,
            'platform': platform,
            'city': city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }
        dic['publish_date'] = '-'.join(re.search(r'(\d{4})-(\d{2})-(\d{2})',content.find(string=re.compile(r'发布(日期|时间)')).text).groups())
        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')