import json
import math
import pickle
import random

import warnings

import click
import facade
import requests
import sys
from xjlibrary.configread import MyConfigParser
from xjlibrary.mdatetime.mtime import getTodayDate, MDateTime
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseIp import get_ip
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError

curPath = BaseDir.get_file_dir_absolute(__file__)
upPath = BaseDir.get_upper_dir(curPath, -1)
configfile = BaseDir.get_new_path(upPath, "db.ini")
topPath = BaseDir.get_upper_dir(upPath, -2)
searchPath = BaseDir.get_new_path(topPath, "download", "sipogov", "download", "search")
BaseDir.create_dir(searchPath)
cookiedir = BaseDir.get_new_path(upPath, "cookie")
sys.path.append(upPath)
from Step1_login import login_once


class SearchDown(object):
    """
    按日期搜索 然后下载对应日期的页并解析
    """

    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.search_data = {
            # "searchCondition.searchExp": "公开（公告）号=(CN+ OR HK+ OR MO+ OR TW+) AND 公开（公告）日=20181201",
            "searchCondition.searchExp": '申请（专利权）人=("华东理工大学" "East China University of Science and Technology")',
            "searchCondition.dbId": "VDB",
            "searchCondition.searchType": "Sino_foreign",
            "searchCondition.extendInfo['MODE']": "MODE_TABLE",
            "searchCondition.extendInfo['STRATEGY']": "STRATEGY_CALCULATE",
            "searchCondition.originalLanguage": "",
            "searchCondition.targetLanguage": "",
            "wee.bizlog.modulelevel": "0200201",
            "resultPagination.limit": "12"
        }
        self.header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Host": "www.pss-system.gov.cn",
            "Origin": "http://pss-system.cnipa.gov.cn",
            "Pragma": "no-cache",
            "Referer": "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/tableSearch-showTableSearchIndex.shtml",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        # 搜索的url
        self.url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/executeTableSearch0529-executeCommandSearch.shtml"
        # 翻页的url
        self.url2 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showSearchResult-startWa.shtml"

        self.proxy = {
            "http": "192.168.30.176:8119",
            "https": "192.168.30.176:8119"
        }
        self.mysqlutils = facade.MysqlUtiles(configfile,
                                             "db",
                                             logger=self.logger)
        self.downpage_value_dicts = {}
        self.date = "20181201"
        self.connection = MongoClient(
            "mongodb://xujiangrw:vipdatacenter@192.168.30.171:27017",
            # 如果“True”且服务器正在运行而没有日志记录，阻塞，直到服务器将所有数据文件同步到磁盘为止。
            fsync=False
        )
        self.db = self.connection['cnipa']
        self.sn = requests.session()
        self.cf = MyConfigParser(configfile).set_keep_keys_case().read_config()
        self.userloginname = ""
        self.j_username = ""
        self.j_password = ""
        self.count404 = 0

    def set_data(self, date):
        """
        设置搜索日期
        :return:
        """
        self.date = date
        # self.search_data["searchCondition.searchExp"] = "公开（公告）号=(CN+ OR HK+ OR MO+ OR TW+ ) AND 公开（公告）日={}".format(
        #     date)

    def set_cookie(self):
        ip = get_ip(self.proxy)
        cookiepath = BaseDir.get_new_path(cookiedir, ip + "_" + str(
            getTodayDate()) + "_" + self.userloginname + ".txt")
        cookiepath_login = BaseDir.get_new_path(cookiedir,
                                                ip + "_" + str(
                                                    getTodayDate()) + "_" + self.userloginname + "_login.txt")

        if BaseDir.is_file_exists(cookiepath):
            self.logger.info("set cookie:" + cookiepath)
            with open(cookiepath, "rb") as f:
                self.sn.cookies.update(pickle.load(f))
        elif BaseDir.is_file_exists(cookiepath_login):
            self.logger.info("set cookie:" + cookiepath_login)
            with open(cookiepath_login, "rb") as f:
                self.sn.cookies.update(pickle.load(f))
        else:
            self.logger.info("无cookie 重新登陆")
            self.login()
            self.set_cookie()

    def search_404(self):
        """
        进行一次搜索 防止404错误
        :return:
        """
        url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/pageIsUesd-pageUsed.shtml"
        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          proxies=self.proxy,
                                                          endstring="",
                                                          mark="IP",
                                                          allow_redirects=True,
                                                          headers=self.header,
                                                          timeout=(30, 60))
        if BoolResult:
            print("搜索成功")
            return True
        else:
            print("搜索失败")
            return False

    def down_search_page(self):
        """
        下载第一页 解析出共有多少页
        :return:
        """
        self.logger.info("search one page get all pages")
        self.logger.info(self.search_data)
        BoolResult, errString, r = facade.BaseRequestPost(self.url,
                                                          sn=self.sn,
                                                          proxies=self.proxy,
                                                          data=self.search_data,
                                                          endstring="",
                                                          mark="searchResultRecord",
                                                          allow_redirects=True,
                                                          headers=self.header,
                                                          timeout=(30, 60))
        if BoolResult:
            dicts = json.loads(r.text)
            # 搜索出来的总量
            totalCount = dicts["resultPagination"]["totalCount"]
            if totalCount == "0" or totalCount == 0:
                sql = "insert ignore into `page_school` (`pub_date`,`page`,`allcount`,`parameter`,`stat`) value ('%s','%s','%s','%s',1)"
                sql = sql % (self.date, "0", "0", "")
                self.logger.info(sql)
                self.mysqlutils.ExeSqlToDB(sql, errExit=True)
                return
            # 翻页请求需要
            searchKeywords = dicts["searchResultDTO"]["dealSearchKeywords"]
            self.downpage_value_dicts["searchCondition.searchKeywords"] = ",".join(searchKeywords)

            strategy = dicts["searchCondition"]["strategy"]
            if strategy == "null":
                strategy = ""
            self.downpage_value_dicts["searchCondition.strategy"] = strategy
            self.downpage_value_dicts["searchCondition.resultMode"] = "SEARCH_MODE"
            self.downpage_value_dicts["searchCondition.targetLanguage"] = ""
            self.downpage_value_dicts["searchCondition.literatureSF"] = dicts["searchResultDTO"]["literatureSF"]
            self.downpage_value_dicts["searchCondition.dbId"] = ""
            self.downpage_value_dicts["searchCondition.executableSearchExp"] = dicts["searchResultDTO"][
                "executableSearchExp"]
            self.downpage_value_dicts["searchCondition.searchExp"] = dicts["searchCondition"]["searchExp"]
            self.downpage_value_dicts["resultPagination.limit"] = "12"
            self.downpage_value_dicts["resultPagination.sumLimit"] = "10"
            self.downpage_value_dicts["resultPagination.start"] = "36"
            self.downpage_value_dicts["resultPagination.totalCount"] = str(totalCount)
            self.downpage_value_dicts["searchCondition.sortFields"] = "-APD, +PD"
            self.downpage_value_dicts["searchCondition.searchType"] = "Sino_foreign"
            self.downpage_value_dicts["searchCondition.originalLanguage"] = ""
            self.downpage_value_dicts["searchCondition.extendInfo['MODE']"] = "MODE_TABLE"
            self.downpage_value_dicts["searchCondition.extendInfo['STRATEGY']"] = "STRATEGY_CALCULATE"

            listvalue = []
            for i in range(2, int(math.ceil(int(totalCount) / 12) + 1)):
                self.downpage_value_dicts["resultPagination.start"] = str((i - 1) * 12)
                jsonmsg = json.dumps(self.downpage_value_dicts, ensure_ascii=False)
                listvalue.append((self.date, str(i), totalCount, jsonmsg))
            sql = "insert ignore into `page_school` (`pub_date`,`page`,`allcount`,`parameter`) value (%s,%s,%s,%s)"
            self.mysqlutils.ExeSqlMany(sql, listvalue, errExit=True)
            # BaseDir.single_add_file(BaseDir.get_new_path(searchPath, getTodayDate() + ".txt"), r.text + "\n")
            self.para_page_html(r, "1")
            try:
                self.db.pagehtml_school.insert({"date": self.date, "page": "1", "html": r.text})
                # self.insert_pageidjson(r.text)
            except DuplicateKeyError as e:
                warnings.warn(str(e))
        else:
            if r:
                self.logger.error(r.text)
            if not self.is_login():
                self.sn = requests.session()
                self.login()
            if r.text.find("您的操作太过频繁，为了不影响系统性能，请稍后再试") != -1:
                sys.exit(-1)
            time.sleep(20)
            self.set_cookie()
            self.down_search_page()

    def para_page_html(self, r, page):
        """
        解析json数据
        :param r:
        :param page:
        :return:
        """
        count = 0
        dicts = json.loads(r.text)
        listsipo = dicts["searchResultDTO"]["searchResultRecord"]
        for onetable in listsipo:
            count += 1
            requestsid = onetable["fieldMap"]["ID"]
            app_no = onetable["fieldMap"]["AP"].replace("<FONT>", "").replace("</FONT>", "")
            app_date = onetable["fieldMap"]["APD"].replace("<FONT>", "").replace("</FONT>", "")
            pub_no = onetable["fieldMap"]["PN"].replace("<FONT>", "").replace("</FONT>", "")
            pub_date = onetable["fieldMap"]["PD"].replace("<FONT>", "").replace("</FONT>", "")
            title = onetable["fieldMap"]["TIVIEW"]
            author = onetable["fieldMap"]["INVIEW"]
            cpnum = onetable["fieldMap"]["CPNUM"]
            msg = json.dumps(onetable)
            dicts = {"requestsid": requestsid,
                     "app_no": app_no,
                     "app_date": app_date,
                     "pub_no": pub_no,
                     "region": pub_no[:2],
                     "pub_date": pub_date,
                     "title": title,
                     "author": author,
                     "date": self.date,
                     "page": page,
                     "cpnum": cpnum,
                     "stat": 0}
            try:
                self.db.pub_all_id_school.insert(dicts)
            except DuplicateKeyError as e:
                warnings.warn(str(e))
            dicts["pageidinfo"] = msg
            del dicts["date"]
            del dicts["page"]
            try:
                self.db.pageidjson.insert(dicts)
            except DuplicateKeyError as e:
                warnings.warn(str(e))
        if count == 0:
            self.logger.info("下载的数据有误 将状态写为-1")
            return False
        return True

    def down_page(self, pub_date, page, para):
        self.date = pub_date
        datedownpage = json.loads(para)
        if datedownpage["searchCondition.strategy"] in [None, 'None', '', 'null']:
            datedownpage["searchCondition.strategy"] = ""
        BoolResult, errString, r = facade.BaseRequestPost(self.url2,
                                                          sn=self.sn,
                                                          proxies=self.proxy,
                                                          data=datedownpage,
                                                          endstring="",
                                                          mark="searchResultRecord",
                                                          allow_redirects=True,
                                                          headers=self.header,
                                                          timeout=(30, 60))
        if BoolResult:
            self.count404 = 0
            if r.text.find("非法索引") != -1:
                self.logger.info(datedownpage)
                self.logger.error("出现非法索引,翻页失败")
                sys.exit(-1)
            # BaseDir.single_add_file(BaseDir.get_new_path(searchPath, getTodayDate() + ".txt"), r.text + "\n")
            resultbool = self.para_page_html(r, str(page))
            try:
                self.db.pagehtml_school.insert({"date": self.date, "page": page, "html": r.text})
                # self.insert_pageidjson(r.text)
            except DuplicateKeyError as e:
                warnings.warn(str(e))
            stat = 1
            if not resultbool:
                stat = -1
            sql = "update page_school set stat={} where page='{}' and pub_date='{}' ".format(stat, page, pub_date)
            self.mysqlutils.ExeSqlToDB(sql)
        else:
            if r is not None:
                print("count404 is:" + str(self.count404))
                self.logger.error(r.text)
                if r.status_code == 404:
                    self.count404 += 1
                    if self.is_login():
                        results = self.search_404()
                        if not results:
                            self.logger.info("遭遇404后搜索失败，程序逻辑需要修复")
                            sys.exit(-1)
                        else:
                            self.logger.info("搜索成功 虽然本次会跳出循环但下次应该会成功")
                if self.count404 > 5:
                    self.sn = requests.session()
                    self.login()
            if not self.is_login():
                self.sn = requests.session()
                self.login()
            # self.logger.info(datedownpage)
            self.set_cookie()
            self.down_page(pub_date, page, para)
        time.sleep(random.randint(8, 13))

    def insert_pageidjson(self, html):
        jsonmsg = json.loads(html)
        arraysearch = jsonmsg["searchResultDTO"]["searchResultRecord"]
        for onedata in arraysearch:
            requestsid = onedata["fieldMap"]["ID"]
            msg = json.dumps(onedata)
            try:
                self.db.pageidjson.insert({'requestsid': requestsid, "pageidinfo": msg})
            except DuplicateKeyError as e:
                warnings.warn(str(e))

    def is_login(self):
        url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/portal/uiIndex.shtml"
        header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cache-Control": "no-cache",
            "Host": "www.pss-system.gov.cn",
            "Pragma": "no-cache",
            "Proxy-Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
        }
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=self.sn,
                                                      proxies=self.proxy,
                                                      mark="欢迎访问专利检索及分析",
                                                      allow_redirects=True,
                                                      headers=header,
                                                      timeout=(30, 60))
        if BoolResult:
            if r.text.find("请登录") > -1:
                # 登陆失败
                print("登陆是失败的，cookie 无效")
                return False
            else:
                # 登陆成功
                print("登陆是成功的，cookie有效")
                return True

    def login(self):
        """
        调用登陆程序
        :return:
        """
        login_once(self.userloginname, self.j_username, self.j_password, self.proxy, self.logger)

    def selectDB(self):
        sql = "select pub_date,page,parameter from page_school where stat=0"
        rows = self.mysqlutils.SelectFromDB(sql)
        return rows

    def select_date(self):
        sql = "select pub_date from page_school GROUP BY pub_date ORDER BY pub_date ASC limit 1"
        rows = self.mysqlutils.SelectFromDBFetchOne(sql)
        return rows

    def set_proxys(self, proxy=None):
        self.proxy = proxy
        # if self.proxy:
        #     self.header["X-Forwarded-For"] = proxy["http"].split(":")[0]

    def set_user(self, ):
        sql = "select `username`,`j_username`,`j_password` from `user` where stat=1 order by `createtime` ASC limit 1"
        row = self.mysqlutils.SelectFromDBFetchOne(sql)
        sql = "update `user` set `createtime`='{}' where `username`='{}'".format(
            MDateTime.get_msec_time()[0], row[0])
        self.mysqlutils.ExeSqlToDB(sql)
        self.userloginname = row[0]
        self.j_username = row[1]
        self.j_password = row[2]


def single_task_mode():
    """
    单任务模式 该模式下获取任务和执行都在该函数下
    且只能执行一个账号
    :return:
    """
    StartTime = time.time()
    down = SearchDown()
    down.set_data("20181130")
    proxy = {
        "http": "192.168.30.3:8080",
        "https": "192.168.30.3:8080"
    }
    down.set_proxys(proxy)
    down.set_user()
    down.set_cookie()
    down.is_login()
    while True:
        rows = down.selectDB()
        if rows:
            for row in rows:
                down.down_page(row[0], row[1], row[2])
                print('耗费时间: %d' % int(time.time() - StartTime) + '秒')
        else:
            down.search_404()
            down.down_search_page()
            # row = down.select_date()
            # if row:
            #     # down.set_data(beforday(row[0]))
            #     down.down_search_page()
            # else:
            #     down.down_search_page()
        time.sleep(60)


"""
**********************多进程分布式代码**************************
"""
import queue
import time
from multiprocessing.managers import BaseManager


# 创建类似的QueueManager:
class QueueManager(BaseManager):
    pass


class NodeTask(object):
    def __init__(self):
        self.register()
        # 连接到服务器，也就是运行task_master.py的机器:
        self.server_addr = '192.168.30.171'
        print('Connect to server %s...' % self.server_addr)
        # 端口和验证码注意保持与task_master.py设置的完全一致:
        self.m = QueueManager(address=(self.server_addr, 5000), authkey=b'abc')
        self.task = None
        self.result = None

    def register(self):
        # 由于这个QueueManager只从网络上获取Queue，所以注册时只提供名字:
        QueueManager.register('get_task_queue')
        QueueManager.register('get_result_queue')

    def conn(self):
        # 从网络连接:
        self.m.connect()

    def set_task_result_obj(self):
        # 获取Queue的对象:
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()


@click.command()
@click.option('--proxy', default='proxy1', type=str, help='请输入使用的代理配置文件的opt_key')
# @click.option('--user', default='userl', type=str, help='请输入用户的sesc')
def task_distributed(proxy):
    node = NodeTask()
    node.conn()
    node.set_task_result_obj()
    down = SearchDown()
    down.set_data("20181129")
    if proxy == "":
        proxy = None
    else:
        proxy = down.cf.get_value("proxy", proxy)
        proxy = {
            "http": proxy,
            "https": proxy
        }
    down.set_proxys(proxy)
    down.set_user()
    down.set_cookie()
    down.is_login()
    while True:
        try:
            n = node.task.get()
            print("请求的参数:")

            if n[0] == "down_search_page":
                down.set_data(n[1])
                results = down.search_404()
                if not results:
                    down.logger.info("遭遇404后搜索失败，程序逻辑需要修复")
                    sys.exit(-1)
                else:
                    down.logger.info("搜索成功 虽然本次会跳出循环但下次应该会成功")
                down.down_search_page()
            elif n[0] == "down_page":
                down.down_page(n[1], n[2], n[3])
            else:
                print("队列出错")
                sys.exit(-1)
        except queue.Empty:
            print('task queue is empty.')


if __name__ == "__main__":
    task_distributed()

