import time
from base_spider import BasePolicySpider
import re
from bs4 import BeautifulSoup
"""北京市知识产权局 政策文件 通知公告 法律法规"""

class zscqjPolicySpider(BasePolicySpider):
    def __init__(self):
        super().__init__(
            base_url="https://sw.beijing.gov.cn/zwxx/2024zcwj/index{p}.html",
            list_page_pattern="https://zscqj.beijing.gov.cn/zscqj/zwgk/{t}-{p}.html")
        self.cookies = {
                "Path": "/",
                "_va_ref": "%5B%22%22%2C%22%22%2C1749535199%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D0FKQXZLSS1dlOtTsqRuQzMvNoFSC2DDoe-yBdfsGh_CEWx1HjVyOgmoUy7bAtt_N%26wd%3D%26eqid%3Db6b8b0b9004b8ff1000000066846b5d8%22%5D",
                "_va_ses": "*",
                "JSESSIONID": "Zjc3NzYxNGEtZWY1YS00Njk1LWE5MDktMDQwMDZiYmMxNTgz",
                "arialoadData": "false",
                "__jsluid_s": "6edb09e79a60d21197c0bd0a1479e190",
                "_va_id": "b1d08f7d3c18ece9.1743392345.19.1749541189.1749530164."
            }
        self.get_categories = [
            'flfg18/a12bf751',
            'tzgg/2fbe2a94',
            'zcwj92/27b2c058'
        ]

    def get_list(self, p, t):
        if p == 0:
            url = self.list_page_pattern.format(t=t, p='')
        else:
            url = self.list_page_pattern.format(t=t, p=f"{p}")
            # if 'sthjj.beijing.gov' in self.list_page_pattern or 'zjw.beijing.gov.' in self.list_page_pattern or 'nyncj.beijing.gov' in self.list_page_pattern or 'zscqj.beijing.gov.' in self.list_page_pattern:
            #     url = self.list_page_pattern.format(t=t, p=f"{p}")
        response = self._session_get(url)
        if response == '404 Not Found':
            return '404 Not Found'

        soup = BeautifulSoup(response.text, 'html.parser')
        return self.extract_list_from_page(soup, t,url)
    def extract_list_from_page(self, soup, t,url):
        div = soup.find('ul', {'class': 'subpageCon-conList'})

        page_list_href = ['https://zscqj.beijing.gov.cn/'+a.find('a')['href'].lstrip('.').strip() if 'http' not in a.find('a')['href'] else a.find('a')['href'].strip() for a in div.find_all('li')] if div else []
        for url in page_list_href:
            self.parse_detail_page(url)


    def parse_detail_page(self, detail_url, t=''):
        detail_url = 'https://zscqj.beijing.gov.cn/zscqj/zwgk/zcwj92/543433824/index.html'
        soup = self.get_detail(detail_url)
        if not soup:
            return None
        # 正文
        content = soup.find('div', {'id': 'contentDiv'})

        if not content:
            return None
        # 标题
        title = content.find('div',{'class':'article-tit'}).text.strip()

        # 正文文本内容
        div_zhengwen = content.find('div', {'class': 'article-word'})

        detail_content = div_zhengwen.text
        # 附件板块

        fujian_list =[c for c in content.find_all('a') if self.get_url_houzhui(c.get('href','')) in self.attachment_extensions]
        #附件链接
        attach_url = [c['href'] for c in  fujian_list if c.get('href','') !='']
        base_url = 'https://zscqj.beijing.gov.cn/'
        attach_url = [d if 'http' in d else base_url + d.lstrip('.') for d in attach_url]
        #附件路径
        file_name = f'D:\work\code\政策\{self.city}\{self.platform}\附件'
        # 其他字段直接调用父类方法提取
        common_data = self.extract_common_fields(content)
        

        dic = {
            'detail_url':detail_url,
            'platform': self.platform,
            'city': self.city,
            'title': title,
            'detail_content': detail_content,
            'attach_url': attach_url,
            **common_data
        }

        validity_tag = content.find(string=re.compile('有  效  性'))
        dic['validity'] = validity_tag.find_next().text.strip()
        #  下载附件并将数据存入json文件
        self.download_to_json(dic,fujian_list,file_name,'beijing.json')