import requests
requests.adapters.DEFAULT_RETRIES = 5
from lxml import etree
from lxml.html import tostring
import datetime
import re
import time
from public.Logs.log import log1
from public.conifig import connect, sourceNameIDList
from public.utils import save_data, max_date



url = "https://www.shui5.cn/article/NianDuCaiShuiFaGui/"
resp = requests.get(url)
html = etree.HTML(resp.text)
detail_url = html.xpath("//div[@class='xwt1_a']/a/@href")
detail_resp = requests.get(detail_url[0])
# detatil_text = detail_resp.content.decode().replace("'", '"')
# print(detatil_text)
detail_html = etree.HTML(detail_resp.content.decode())
content = detail_html.xpath("//div[@class='arcContent']")
source = detail_html.xpath("//div[@class='articleResource']")[0]
# source_str = tostring(source, encoding = "utf-8", pretty_print = True, method = "html")
# source_str = etree.tostring(source, encoding = "utf-8", pretty_print = True, method = "html")
source_str = (etree.tostring(source, encoding="utf-8", pretty_print=True, method="html"))
print(source_str.decode())
reRules = r'<div class="articleResource">来源：<a href="http://" target="_blank">(.*)</a>  作者：<a href="mailto:">(.*)</a>   人气：<script src=".*"></script>  时间：(.*?)</div>'
resource = re.search(reRules, source_str.decode(), flags=re.DOTALL).group(1)
aouth = re.search(reRules, source_str.decode(), flags=re.DOTALL).group(2)
date = re.search(reRules, source_str.decode(), flags=re.DOTALL).group(3)
sub_title = detail_html.xpath("//div[@class='articleDes']/text()")


# module_url = "https://www.cnipa.gov.cn/module/web/jpage/dataproxy.jsp"
headers = {
    "user_agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
}
detail_headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
}
data = {
    "col": 1,
    "webid": 1,
    "path": "https://www.cnipa.gov.cn/",
    "columnid": 66,
    "sourceContentType": 1,
    "unitid": 669,
    "webname": "国家知识产权局",
    "permissiontype": 0
}
sess = requests.session()
# sess.keep_alive = False
# 连接数据库
cursor = connect.cursor()
category_id = 1  # 文章分类


def request_index(url):
    resp = sess.get(url, headers=headers)
    html = etree.HTML(resp.text)
    detail_url = html.xpath("//div[@class='xwt1_a']/a/@href")
    next_url = html.xpath("//a[text()='下一页']/@href")
    # if not next_url:
    #     return
    return detail_url, next_url


def parse_url(totalRecord, page, max_time, source_name_id):
    for p in range(page):
        re_str = '<a href="(https://www.cnipa.gov.cn/art/\d+/\d+/\d+/\w+.html)" target="_blank">'
        ret = re.findall(re_str, resp.text)
        for r in ret:
            if request_detail(r, max_time, source_name_id):
                return True
        time.sleep(2)


def request_detail(url, max_time, source_name_id):
    resp_detail = sess.get(url=url, headers=detail_headers)
    """
    <meta name="ArticleTitle" content="国家知识产权局专利局合肥代办处专利费用收缴账户变更的公告（第376号）">
    <meta name="pubdate" content="2020-10-09 15:16"/>
    """
    pubDate = re.search('<meta name="pubdate" content="(.*)"/>', resp_detail.text)  # 发布时间
    article_date = pubDate.group(1) if pubDate else ""
    article_date_time = datetime.datetime.strptime(article_date, "%Y-%m-%d %H:%M")
    if max_time:
        if article_date_time < max_time:
            log1.info("{}无最新文章".format(source_name_id))
            return True
    articleTitle = re.search('<meta name="ArticleTitle" content="(.*)">', resp_detail.text)  # 标题
    title = articleTitle.group(1) if articleTitle else ""
    description = re.search('<meta name="description" content="(.*)">', resp_detail.text)  # 描述
    sub_title = description.group(1) if description else ""
    siteName = re.search('<meta name="SiteName" content="(.*)">', resp_detail.text)  # 来源
    source_name = siteName.group(1) if siteName else ""
    # SiteDomain = re.search('<meta name="SiteDomain" content="(.*)">', resp_detail.text).group(1)  # 网址
    # SiteIDCode = re.search('<meta name="SiteIDCode" content="(.*)">', resp_detail.text).group(1)  # 文章Code
    guid = re.search('<meta name="guid" content="(.*)">', resp_detail.text)  # 文章ID
    source_id = guid.group(1) if guid else ""
    html = re.search('<h1>.*?</style>', resp_detail.text, flags=re.DOTALL)  # 内容标签
    content = html.group() if html else ""
    print(title)
    author = ""
    image = ""
    datas = (title, source_name, source_id, url, article_date, content, source_name_id, category_id, sub_title, author, image)
    # 入库
    save_data(cursor, datas)


def cnipaMain():
    start = time.time()
    log1.info("\n ----------------------------------国家知识产权局开始爬取----------------------------------")
    # print("\n ---------------------------------国家知识产权局开始爬取----------------------------------")
    for sn in sourceNameIDList:
        source_name_id = sn.get("source_name_id")  # 网站代码
        max_time = max_date(cursor, source_name_id)
        data["columnid"] = sn.get("columnid")
        data["unitid"] = sn.get("unitid")
        sql = "select url from tax_article_from where type=0 and web_type={}".format(source_name_id)
        # cursor = connect.cursor()
        cursor.execute(sql)
        urls = cursor.fetchall()  # ((url), (), ())
        # urls = (("https://www.cnipa.gov.cn/col/col66/index.html",),)
        for u in urls:
            url = u[0]
            result = request_index(url)
            if result:
                totalRecord, page = result
                parse_url(totalRecord, page, max_time, source_name_id)
    cursor.close()
    end = time.time()
    log1.info("国家知识产权局抓取完毕, 共耗时:{}s".format(end - start))


if __name__ == '__main__':
    cnipaMain()
