import requests
from bs4 import BeautifulSoup
import re  # re模块
import time
from SqlCommend import DB

url = "http://law.pkulaw.com"
db = DB()

head = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.50"
}
# 由于是post提交方式，所以需要一个数据源
# 发送post请求,发送的数据必须放在字典中，通过data参数进行传递
dat = {
    "Menu": "fl",
    "SearchKeywordType": "Title",
    "Library": "falv",
    "ShowType": 'Default',
    "Pager.PageIndex": "1",
    "Pager.PageSize": "10",
    "OldPageIndex": "0",
    "X-Requested-With": "XMLHttpRequest"
}


def getLawInformation(s, e):
    print(f"第{e}页\n")
    time.sleep(1)
    dat['OldPageIndex'] = s
    dat['Pager.PageIndex'] = e
    resp = requests.post(url + '/fl/search/RecordSearch',
                         data=dat,
                         headers=head)
    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器
    a = page.find_all("a", attrs={"target": "_blank", "flink": "true"})  # 找到标题
    div = page.find_all("div", attrs={"class": "related-info"})  # 找到对应信息
    for j in range(0, 10):  # 匹配题目时多了一倍 下标应该*2
        # time.sleep(random.randint(2, 7))
        content = getLawContent(url + a[j * 2].get("href"))
        state = div[j].text.strip().split('/')  # 信息
        appear = ""
        start = ""
        if len(state[-2].strip()) < 10:
            appear = state[-2].strip()[:7] + ".01"
        else:
            appear = state[-2].strip()[:10]
        if len(state[-1].strip()) < 10:
            start = state[-1].strip()[:7] + ".01"
        else:
            start = state[-1].strip()[:10]

        # print(a[j * 2].text.split(' ', 1)[1])  # 题目去掉对应序号
        # s = getLawContent(url + a[j * 2].get("href"))
        # print(s)
        # print(a[j * 2].get("href"))  # 链接
        # print(state[0].strip())  # 状态
        # print(state[-2].strip()[:10])  # 公布时间
        # print(state[-1].strip() + '\n')  # 施行时间
        sql = "insert into Report values(%s,%s,%s,%s,%s,%s)"
        rows = db.executeUpdate(sql, [
            e * 10 + j + 1, a[j * 2].text.split(' ', 1)[1], state[0].strip(),
            appear, start, content
        ])
        if rows > 0:
            print(a[j * 2].text.split(' ', 1)[1] + " Accept")
        else:
            print(a[j * 2].text.split(' ', 1)[1] + " Wrong")
    resp.close()  # 关闭请求，防止因为请求次数过多导致请求失败


def getLawContent(link):
    resp = requests.get(link + '?type=text', headers=head)  # 对应法律内容的界面
    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器
    fullText = page.find("div", id="divFullText", class_="fulltext")  # 内容部分
    divs = fullText.find_all("div", attrs={"class":
                                           re.compile('tiao-wrap')})  # 文字部分
    s = ""
    for bz in divs:
        s += (bz.text + '\n')
    s += '*本数据库提供的电子文本正式引用时请与标准文本核对。\n'
    resp.close()
    return s


for p in range(0, 106):  # 106
    if p == 0:
        getLawInformation(1, 0)
    else:
        getLawInformation(p - 1, p)