# -*- coding: utf-8 -*-
"""
===============================
@Author     : Zuo WenTao
@Time       : 2024/10/29 14:43
@Description: 
@Change     : 
@File       : main.py
===============================
"""
import json
import os
import platform
import random
import re

import ddddocr
import requests
import uuid
import my_fake_useragent as ua1

from loguru import logger

from lxml import etree

from Api.BaseApi import get_cookies_data
from Api.CRequests import CRequests
from Api.Proxy_long import Proxy_IP
from Api.chaojiying import Chaojiying_Client
from Config.config import proxies_list_gaoxiao
from Api.Request2 import Request2

ua = ua1.UserAgent(family="chrome")

chaojiying = Chaojiying_Client('saconli', 'li245368', '964503')


class IpError(Exception): ...


class NotSearchError(Exception): ...


class DocumentDelivery:
    def __init__(self):
        self.proxys = proxies_list_gaoxiao
        self.S = requests.Session()
        # self.S.proxies.update(Proxy_IP.get_proxy(Type="L"))
        cookies = get_cookies_data("bailian_cookie:*")

        if not cookies:
            raise Exception("缓存中cookie为空，请联系管理员")

        cookies = json.loads(cookies[list(cookies.keys())[0]])
        self.email = cookies.pop("email_address", None)
        if not self.email:
            raise Exception("缓存中email为空，请联系管理员")
        self.S.cookies.update(cookies)
        self.captcha_path = rf"D:\yingke\data\文献传递Captcha" if platform.system().lower() != 'linux' else "/mnt/yingke/data/文献传递Captcha"
        os.makedirs(self.captcha_path, exist_ok=True)

    def search_document_by_doi(self, doi, type="doi"):
        headers = {
            "Referer": "https://www.duxiu.com/",
            "User-Agent": str(ua.random())
        }
        url = "https://fjour.blyun.com/searchFJour"
        if type == "doi":
            params = {
                "Field": "8",
                "channel": "searchFJour",
                "sw": doi,
                "edtype": "",
                "view": "0"
            }
        else:
            params = {
                "sw": doi,
                "allsw": "",
                "bCon": "",
                "ecode": "utf-8",
                "channel": "searchFJour",
                "Field": "1"
            }
        response, self.S = Request2.GetResponse(url, params=params, S=self.S, return_S=True, headers=headers)
        response.encoding = "utf-8"
        if "IP不在我们" in response.text:
            raise IpError
        re_data = re.findall(r"javaScript:subtoRefer\('(\d+)','(.*?)','\d+','(\d+)',''\)\" > 图书馆文献传递</a>", response.text.replace("\n", ""))
        if not re_data and "抱歉，没有找到与" in response.text:
            raise NotSearchError
        return f"https://fjour.blyun.com/fjourfitst.jsp?dxNumber={re_data[0][0]}&timestr={re_data[0][2]}&d={re_data[0][1]}"

    def send_email(self, url, retry_count=0):

        response, self.S = Request2.GetResponse(url, S=self.S, return_S=True)  # , is_proxy="no"
        response.encoding = "utf-8"

        captcha_code, pic_id = self.skip_captcha()
        request_data, title = self.parse_data(response.text, self.email, captcha_code)

        headers = {
            "Origin": "https://www.xinyunfuwu.com",
            "Referer": "https://www.xinyunfuwu.com/mag.do",
            "User-Agent": str(ua.random())
        }

        submit_url = "https://www.xinyunfuwu.com/magsubmit.do"
        response, self.S = Request2.GetResponse(submit_url, headers=headers, data=request_data, S=self.S, return_S=True, method="POST", is_proxy="no")
        response.encoding = "utf-8"

        if "您输入的验证码不正确，请重试" in response.text:
            chaojiying.ReportError(pic_id)
            logger.warning(f"第 {retry_count} 次识别失败")
            if retry_count < 2:
                return self.send_email(url, retry_count + 1)
            else:
                return False, "验证码多次失败", self.email, title
        elif "此记录您已咨询过一次！" in response.text:
            return True, "此记录您已咨询过一次", self.email, title
        else:
            return True, "文献传递成功", self.email, title

    @staticmethod
    def parse_data(html, email, captcha_code):
        etree_html = etree.HTML(html)
        return {
            "mf.type": etree_html.xpath("//input[@name='fmf.type']/@value")[0],
            "mf.content": etree_html.xpath("//input[@name='fmf.content']/@value")[0],
            "cxrepaytype": "1",
            "mf.email": email,
            "mf.verifycode": captcha_code,
            "mf.title": etree_html.xpath("//input[@name='fmf.title']/@value")[0],
            "mf.sslogin": "0",
            "mf.islogin": "",
            "mf.unitid": etree_html.xpath("//input[@name='fmf.unitid']/@value")[0],
            "mf.username": etree_html.xpath("//input[@name='fmf.username']/@value")[0],
            "mf.d": etree_html.xpath("//input[@name='fmf.d']/@value")[0],
            "mf.ssnumber": "",
            "mf.url": "",
            "mf.dxid": etree_html.xpath("//input[@name='fmf.dxid']/@value")[0],
            "mf.articu": ",",
            "mf.autoaid": ",",
            "mf.tag": "",
            "mf.choren": "1",
            "mf.dataprovider": etree_html.xpath("//input[@name='fmf.dataprovider']/@value")[0],
            "mf.ipadkey": etree_html.xpath("//input[@name='fmf.ipadkey']/@value")[0],
            "mf.ipaduserid": etree_html.xpath("//input[@name='fmf.ipaduserid']/@value")[0],
            "mf.ipadtype": "",
            "mf.mobile": etree_html.xpath("//input[@name='fmf.mobile']/@value")[0],
            "mf.domtag": "",
            "mf.nptext": "",
            "mf.gid": "",
            "mf.refer": etree_html.xpath("//input[@name='fmf.refer']/@value")[0],
            "mf.magid": etree_html.xpath("//input[@name='fmf.magid']/@value")[0],
            "mf.uid": etree_html.xpath("//input[@name='fmf.uid']/@value")[0],
            "mf.iptype": etree_html.xpath("//input[@name='fmf.iptype']/@value")[0],
            "mf.domain": etree_html.xpath("//input[@name='fmf.domain']/@value")[0],
            "mf.year": etree_html.xpath("//input[@name='fmf.year']/@value")[0],
            "clickcancel": "no",
            "mf.gofirsturl": etree_html.xpath("//input[@name='fmf.gofirsturl']/@value")[0],
        }, etree_html.xpath("//input[@name='fmf.title']/@value")[0]

    def skip_captcha(self, url="https://www.xinyunfuwu.com/vImage.jsp"):
        response = self.S.get(url)
        save_path = os.path.join(self.captcha_path, f"{uuid.uuid4()}.jpg")
        with open(save_path, "wb") as f:
            f.write(response.content)

        with open(save_path, 'rb') as r:
            img_bytes = r.read()
        # ocr_engine = ddddocr.DdddOcr()
        # text = ocr_engine.classification(img_bytes)
        text_info = chaojiying.PostPic(img_bytes, 1005)
        # logger.debug(text_info)
        text = text_info["pic_str"]
        pic_id = text_info["pic_id"]
        if re.search(r'[0-9a-zA-Z]{4}', text) is not None and len(text) == 5:
            return text, pic_id
        else:
            return "", pic_id

    def run(self, doi, type="doi"):
        try:
            url = document_delivery.search_document_by_doi(doi, type)
        except IpError:
            return False, "IP被封禁", self.email, ""
        except NotSearchError:
            return False, "未搜索到文献", self.email, ""
        return document_delivery.send_email(url)


document_delivery = DocumentDelivery()


class SpiderDoi:
    def __init__(self, url):
        self.url = url

    def re_doi(self,doi_text):
        doi_re = r"(10\.\d{4,5}/[\d\:\.\,\(\)\;\[\]\_\<\>\&\-\+\/a-zA-Z]{1,200})"
        doi_list = re.findall(doi_re, str(doi_text))
        if doi_list:
            return doi_list[0]
        else:
            return ""

    def run(self):
        response = Request2.GetResponse(url=self.url)
        # response = CRequests().get_response(url=self.url,is_proxy="Short")
        response.encoding = "utf-8"
        doi = re.findall(r'<meta name=".*?doi" content="(.*?)"\s*/>', response.text.replace("\n", ""))
        doi = doi[0] if doi else self.re_doi(response.text)
        if doi:
            return doi
        else:
            return ""


if __name__ == '__main__':
    doi = "10.1117/12.919139"
    # doi = "10.1149/1945-7111/ac8a1b"
    # doi = "The Effect of Seeding Rate on Spring Maize Grain Quality and Yield"
    # print(DocumentDelivery().run(doi, type="title"))
    print(document_delivery.run(doi))
