"""
这里下载某一天的某一页 具体到页面
"""
import json
import pickle
import sys
import time

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.mdatetime.mtime import getTodayDate
from xjlibrary.our_file_dir import BaseDir

from cnipr.Step1_login import Login

curPath = BaseDir.get_file_dir_absolute(__file__)
cookiedir = BaseDir.get_new_path(curPath, "cookies")
configfile = BaseDir.get_new_path(curPath, "db.ini")


class DownPage():
    def __init__(self):
        self.url = "http://search.cnipr.com/search!doOverviewSearch.action"
        self.postdata = {
            "wgViewmodle": "",
            "strWhere": "公开（公告）日=(20180904)",
            "start": "2",
            "limit": "10",
            "option": "2",
            "iHitPointType": "115",
            "strSortMethod": "RELEVANCE",
            "strSources": "FMZL,SYXX,WGZL,FMSQ",
            "strSynonymous": "",
            "yuyijs": "",
            "filterChannel": "",
            "keyword2Save": "",
            "key2Save": "",
            "forward": "",
            "otherWhere": "",
            "username": "",
            "password": ""
        }
        self.header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded",
            "Host": "search.cnipr.com",
            "Origin": "http://search.cnipr.com",
            "Pragma": "no-cache",
            "Referer": "http://search.cnipr.com/search!doOverviewSearch.action",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
        }
        self.sn = requests.session()
        self.logger = facade.get_streamlogger()
        self.cookiefile = BaseDir.get_new_path(cookiedir, str(getTodayDate()) + ".txt")
        self.pub_data = ""
        self.page = ""
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", logger=self.logger)

    def set_cookiefile(self, cookie_file):
        """
        设置cookie文件
        :return:
        """
        self.cookiefile = cookie_file

    def set_cookie(self):
        """
        设置cookie 通过cookie文件
        :return:
        """
        self.logger.info(self.cookiefile)
        if BaseDir.is_file_exists(self.cookiefile):
            with open(self.cookiefile, "rb") as f:
                self.sn.cookies.update(pickle.load(f))
        else:
            Login().login()
            self.set_cookie()

    def select(self):
        sql = "select pub_date,page from search where stat=0 limit 100"
        rows = self.mysqlutils.SelectFromDB(sql)
        return rows

    def down_page(self, retrynum=0, relogin=0):
        if retrynum > 3:
            if relogin > 0:
                self.logger.info("重新登陆无效 结束程序查找原因")
                sys.exit(-1)
            self.logger.info("已经连续下载3次 现在怀疑是登陆原因 调用重新登陆")
            Login().login()
            self.down_page(0, 1)
        if self.pub_data == "" or self.page == "":
            raise Exception("没有设置请求日期")
        self.postdata["strWhere"] = "公开（公告）日=({})".format(self.pub_data)
        self.postdata["start"] = self.page
        self.set_cookie()
        self.logger.info(self.postdata["strWhere"])
        self.logger.info(self.postdata)
        BoolResult, errString, r = facade.BaseRequestPost(url=self.url,
                                                          sn=self.sn,
                                                          data=self.postdata,
                                                          mark="g_filter_left",
                                                          headers=self.header,
                                                          timeout=(30, 60)
                                                          )
        if BoolResult:
            self.logger.info("搜索成功 现在解析数据并存入数据库")
            self.para_search(r)
            self.update_db()
            time.sleep(10)
        else:
            if errString == "Feature err":
                if r.text.find("你没有访问该页面的权限") > -1:
                    Login().login()
                    self.down_page()
            self.logger.info("搜索页面失败 现在睡眠1分钟后重新下载...")
            time.sleep(60)
            retrynum += 1
            self.down_page(retrynum)

    def update_db(self):
        sql = "update search set stat=1 where pub_date='{}' and page={}".format(self.pub_data, self.page)
        self.mysqlutils.ExeSqlToDB(sql)

    def para_search(self, r):
        """
        解析下载的搜索页面并解析到数据库
        :param r:
        :return:
        """
        BaseDir.single_write_file(r.text,"./test.html")
        soup = BeautifulSoup(r.text, "lxml")
        # soup = BeautifulSoup(open("test.html",encoding="utf-8"), "lxml")
        div_tag_list = soup.find("div", class_="g_list")

        if div_tag_list:
            div_item_all = div_tag_list.find_all("div", class_="g_item")
            for div_item in div_item_all:
                dicts = {}
                title = div_item.find("li", class_="g_li")["title"]
                legal_status = div_item.find("li", class_="g_li2").get_text()
                table_tag = div_item.find("table")
                td_tags = table_tag.find_all("td")
                dicts["title"] = title
                dicts["legal_status"] = legal_status

                for td in td_tags:
                    tdstring = "".join(td.stripped_strings)
                    if tdstring.find("申请号") > -1 and tdstring.find("分案原") == -1:
                        app_no = tdstring.replace("申请号：", "")
                        dicts["app_no"] = app_no
                    elif tdstring.find("申请日") > -1:
                        app_date = tdstring.replace("申请日：", "")
                        dicts["app_date"] = app_date
                    elif tdstring.find("公开(公告)号") > -1:
                        pub_no = tdstring.replace("公开(公告)号：", "")
                        dicts["pub_no"] = pub_no
                    elif tdstring.find("公开(公告)日") > -1:
                        pub_date = tdstring.replace("公开(公告)日：", "")
                        dicts["pub_date"] = pub_date
                    elif tdstring.find("同日申请") > -1:
                        oneday = tdstring.replace("同日申请：", "")
                        dicts["oneday"] = oneday
                    elif tdstring.find("分案原申请号") > -1:
                        old_app_no = tdstring.replace("分案原申请号：", "")
                        dicts["old_app_no"] = old_app_no
                    elif tdstring.find("申请(专利权)人") > -1:
                        applicant = tdstring.replace("申请(专利权)人：", "")
                        dicts["applicant"] = applicant
                    elif tdstring.find("分类号") > -1:
                        ipc_no = tdstring.replace("分类号：", "")
                        dicts["ipc_no"] = ipc_no
                    elif tdstring.find("优先权") > -1:
                        priority = tdstring.replace("优先权：", "")
                        dicts["priority"] = priority
                    elif tdstring.find("摘要") > -1:
                        abstracts = tdstring.replace("摘要：", "")
                        abstracts = abstracts.replace("'","''")
                        dicts["abstracts"] = abstracts
                div_tag = div_item.find("div", class_="g_cont_rig")
                if div_tag:
                    coverurl_tag = div_tag.find("a")
                    if coverurl_tag:
                        coverurl = coverurl_tag["href"]
                        dicts["coverurl"] = coverurl
                    else:
                        dicts["coverurl"] = ""
                jsmsg = json.dumps(dicts, ensure_ascii=False)

                sql = "insert ignore into pubno (pub_no,app_no,jsonmsg,pub_date,page) values ('{}','{}','{}','{}',{})".format(pub_no, app_no, jsmsg,pub_date,self.page)
                self.mysqlutils.ExeSqlToDB(sql)
        else:
            self.logger.info("该请求没有数据 请检查")
            sys.exit(-1)


if __name__ == "__main__":
    downpage = DownPage()
    downpage.set_cookie()
    while True:
        rows = downpage.select()
        if not rows:
            break
        for row in rows:
            downpage.pub_data = row[0]
            downpage.page = str(row[1])
            downpage.down_page()

