import time
from base_spider import BasePolicySpider
import re

"""北京市文化和旅游局 政策文件 通知公告"""

class whlyjPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://whlyj.beijing.gov.cn/zwgk/{t}/index{p}.html")
        self.cookies = {
                "Path": "/",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749535199%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "JSESSIONID": "Zjc3NzYxNGEtZWY1YS00Njk1LWE5MDktMDQwMDZiYmMxNTgz",
                "arialoadData": "false",
                "__jsluid_s": "6edb09e79a60d21197c0bd0a1479e190",
                "_va_id": "b1d08f7d3c18ece9.1743392345.19.1749541189.1749530164."
            }
        self.get_categories = [
            '2024zcwj',
            'tzgg'
        ]


    def extract_list_from_page(self, soup, t,url):
        div = soup.find('ul', {'class': 'bmbct-list10'})

        page_list_href = ['/'.join(url.split('/')[:-1])+'/'+a.find('a')['href'].lstrip('.').strip() if 'http' not in a.find('a')['href'] else a.find('a')['href'].strip() for a in div.find_all('li')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url)
    def extract_common_fields(self,content):
        """
                提取通用字段，子类可覆盖此方法以适配不同页面结构
                """
        publish_date_tag = content.find(string=re.compile(r'发布(日期|时间)'))
        theme_tag = content.find(string=re.compile('主题分类'))
        unit_tag = content.find(string=re.compile('(制发单位|发文机构|发文单位)'))
        impl_date_tag = content.find(string=re.compile('实施(日期|时间)'))
        write_date_tag = content.find(string=re.compile('成文(日期|时间)'))
        file_num_tag = content.find(string=re.compile(r'发文(字号|序号)'))
        expire_date_tag = content.find(string=re.compile('(失效(日期|时间)|废止日期)'))
        validity_tag = content.find(string=re.compile('有效性'))

        return {
            'publish_date': publish_date_tag.text.strip().split(']')[-1] if publish_date_tag else '',
            'theme': theme_tag.text.strip().split(']')[-1] if theme_tag else '',
            'unit': unit_tag.text.strip().split(']')[-1] if unit_tag else '',
            'impl_date': impl_date_tag.text.strip().split(']')[-1] if impl_date_tag else '',
            'write_date': write_date_tag.text.strip().split(']')[-1] if write_date_tag else '',
            'file_num': file_num_tag.text.strip().split(']')[-1] if file_num_tag else '',
            'expire_date': expire_date_tag.text.strip().split(']')[-1] if expire_date_tag else '',
            'validity': validity_tag.text.strip().split(']')[-1] if validity_tag else '',
            'content': str(content)
        }
    def parse_detail_page(self, detail_url, t=''):

        soup = self.get_detail(detail_url)
        if not soup:
            return None
        # 正文
        content = soup.find('div', {'class': 'pageArticle'})
        if not content:
            content = soup.find('section', {'class': 'sub_right'})
        if not content:
            return None
        # 标题
        title = content.find('h3').text.strip()
        if not title:
            title = content.find('div',{'class': 'articlePageTitle'})
        # 正文文本内容
        div_zhengwen = content.find('div', {'class': 'wrapArticle gp-pr'})
        if not div_zhengwen:
             div_zhengwen = content.find('div', {'class': 'article scroll-pane jspScrollable'})
        detail_content = div_zhengwen.text
        # 附件板块

        fujian_list =[c for c in content.find_all('a',{'target': '_blank'}) if self.get_url_houzhui(c.get('href','')) in self.attachment_extensions]
        #附件链接
        attach_url = [c['href'] for c in  fujian_list]
        base_url = '/'.join(detail_url.split('/')[:-1])
        attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        #附件路径
        file_name = f'D:\work\code\政策\{self.city}\{self.platform}\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)
        

        dic = {
            'detail_url':detail_url,
            'platform': self.platform,
            'city': self.city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }


        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')