#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :flk_npc.py
# @Time      :2023/11/9 
# @Author    :CL
# @email     :1037654919@qq.com
# 爬取 ：https://flk.npc.gov.cn/fl.html 共有651条
import math
import time

import requests
from utils import mongo_manager,get_kuai_proxy
flfg_npc = mongo_manager("flfg_npc",db='public_data')
headers = {
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
    "Accept": "application/json, text/javascript, */*; q=0.01",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate, br",
    "X-Requested-With": "XMLHttpRequest",
    "Connection": "keep-alive",
    "Referer": "https://flk.npc.gov.cn/fl.html",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "Pragma": "no-cache",
    "Cache-Control": "no-cache"
}
cookies = {
    "wzws_sessionid": "gmZhYjg1ZoAxODMuMTMxLjI1My4xOTCBNzY2YjZmoGVMOic=",
    "Hm_lvt_54434aa6770b6d9fef104d146430b53b": "1699494441",
    "Hm_lpvt_54434aa6770b6d9fef104d146430b53b": "1699495101"
}
# 获取法律法规 接口
def get_flfg(url = "https://flk.npc.gov.cn/api/",page = 1,ftype = "flfg"):
    params = {
        "type": ftype,
        "searchType": "title;vague",
        "sortTr": "f_bbrq_s;desc",
        "gbrqStart": "",
        "gbrqEnd": "",
        "sxrqStart": "",
        "sxrqEnd": "",
        "sort": "true",
        "page": page,
        "size": "10",
        "_": int(1000 * (time.time()))  # 当时时间戳
    }
    response = requests.get(url, headers=headers, cookies=cookies, params=params)
    print(f'type:{ftype},page:{page},{response}')
    if response.status_code == 200:
        return response.json()
# 获取宪法部分  https://flk.npc.gov.cn/xf.html
def get_xf():
    import requests
    url = "https://flk.npc.gov.cn/xf.html"
    response = requests.get(url, headers=headers, cookies=cookies)
    print(response)
    if response.status_code == 200:
        from bs4 import BeautifulSoup
        import pandas as pd
        soups = BeautifulSoup(response.text, 'lxml')
        datas = soups.select('table')[0]
        tbl = pd.read_html(datas.prettify())[0]
        tbl.columns = tbl.loc[len(tbl) - 1]
        print(tbl.columns)
        # print(tbl)
        article_infos = [{j: str(row[j]) for j in tbl.columns} for i, row in tbl.iterrows()]
        reslut ={
  "_id": "https://flk.npc.gov.cn/xf.html",
  "id": "https://flk.npc.gov.cn/xf.html",
  "title": "中华人民共和国宪法",
  "office": "全国人民代表大会",
  "type": "宪法",
  "status": "2",
  "data": article_infos,
  "url": "https://flk.npc.gov.cn/xf.html"
}
        try:
            flfg_npc.insertOne(reslut)
        except  Exception as e:
            print(e)


#主体 获取法律法规 https://flk.npc.gov.cn/index.html https://flk.npc.gov.cn/index.html ………………
def main():
    pagesinfo ={'flfg':651,'xzfg':713,'jcfg':1,'sfjs':827,'dfxfg':20495}
    for ftype ,pages in pagesinfo.items():
        for page in range(21,math.ceil(pages/10)+1):
            time.sleep(1)
            datas  = get_flfg(page=page,ftype = ftype)['result']['data']
            for data in datas:
                data["_id"] = data['id']
                data['url'] =str(data['url']).replace('./','https://flk.npc.gov.cn/')
                try:
                    flfg_npc.insertOne(data)
                except Exception as e:
                    print(e)

if __name__ == "__main__":
    print()
    # main()
    # get_xf()
    flfg_npc.close()


