import time
from base_spider import BasePolicySpider
import re

"""北京市卫生健康委员会 最新公开政策文件"""

class WjwPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://wjw.beijing.gov.cn/zwgk_20040/{t}/index{p}.html")
        self.cookies = {
                "Path": "/",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749535199%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "JSESSIONID": "Zjc3NzYxNGEtZWY1YS00Njk1LWE5MDktMDQwMDZiYmMxNTgz",
                "arialoadData": "false",
                "__jsluid_s": "6edb09e79a60d21197c0bd0a1479e190",
                "_va_id": "b1d08f7d3c18ece9.1743392345.19.1749541189.1749530164."
            }
        self.get_categories = [
            'zcwj2024/zcwjss',
            'zxgk'
        ]


    def extract_list_from_page(self, soup, t,url):
        # div = soup.find('div', {'class': 'search2-con'})
        div = soup.find('div', {'class': re.compile('(search2-con|weinei_left_con)')})
        page = int(re.search('index_(\d+).html','https://wjw.beijing.gov.cn/zwgk_20040/zxgk/index_8.html').group(1))
        if t=='zxgk' and page>=8:
            page_list_href = ['/'.join(url.split('/')[:-1]) + '/' + a['href'].lstrip('.').strip() for a in
                              div.find_all('a')] if div else []
        else:
            page_list_href = ['/'.join(url.split('/')[:-2])+'/'+a['href'].lstrip('.').strip() for a in div.find_all('a')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url, t)
    def parse_detail_page(self, detail_url, t=''):
        # detail_url = 'https://scjgj.beijing.gov.cn/zwxx/2024zcwj/202404/t20240428_3643268.html'
        soup = self.get_detail(detail_url)
        if not soup:
            return None
        # 正文
        content = soup.find('div', {'class': 'weinei_left'})
        if not content:
            return None
        # 标题
        title = content.find('div',{'class':'weinei_left_tit_sanji'}).text.strip()
        # 正文文本内容
        div_zhengwen = content.find('div', {'class': 'weinei_left_con_sanji'})
        detail_content = div_zhengwen.text
        # 附件板块
        if t=='zxgk':
            fujian_list = content.find_all('a',{'target':'_blank'})
        else:
            fujian_list = content.find_all('a', {'appendix': 'true'})
        #附件链接
        attach_url = [c['href'] for c in fujian_list]
        base_url = '/'.join(detail_url.split('/')[:-1])
        attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        #附件路径
        file_name = f'D:\work\code\政策\{self.city}\{self.platform}\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)


        dic = {
            'detail_url':detail_url,
            'platform': self.platform,
            'city': self.city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }
        if 'zxgk' ==  t:
            dic['publish_date'] = content.find('li',{'class':'weinei_li'}).text.split('：')[-1]
        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')