import os
import re
import sys
import time

from bs4 import BeautifulSoup
from xjlibrary.database_moudel.simple.mysqlclient import MySqlDbConnect, SelctSqlFromDB, ExeSqlToDB
from xjlibrary.mrequest.baserequest import BaseRequest, MProxyRequest, BaseRequestPost
from xjlibrary.our_file_dir.base_dir import BaseDir
from xjlibrary.tools.BaseUrl import BaseUrl

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "dqlib", "download", "journals")

HEADERS = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "accept-encoding": "gzip, deflate",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "upgrade-insecure-requests": "1",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 "
                  "Safari/537.36 "
}

HEADERSPOST = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "no-cache",
    "Content-Type": "application/x-www-form-urlencoded",
    "Host": "dqlib.vip.qikan.com",
    "Origin": "http://dqlib.vip.qikan.com",
    "Pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}

baseurl = "http://dqlib.vip.qikan.com/text/"


def requestVolIssue(url, proxysList):
    BoolResult, errString, r = MProxyRequest(url, Feature="facets-container", HEADERS=HEADERS, proxyRingList=proxysList,
                                             timeout=(30, 60))
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


def savefile(r, filePath):
    BaseDir.single_write_file(r.text, filePath)


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def SelectFromDB():
    sql = "select `url` from `journal` where `stat`=0"
    conn = MajorDbConnect()
    rows = SelctSqlFromDB(sql, conn)
    return rows

def UpdateSql(sql):
    conn = MajorDbConnect()
    ExeSqlToDB(sql, conn, errExit=True)


def requestJournals(url):
    BoolResult, errString, r = BaseRequest(url, mark="mediaeng", headers=HEADERS, timeout=(30, 45))
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


def requestJournalsPost(url, data):
    num = 0
    BoolResult = False
    errString = ""
    r = None
    while num < 3:
        num += 1
        BoolResult, errString, r = BaseRequestPost(url, data=data, mark="mediaeng", headers=HEADERSPOST, timeout=45)
        if BoolResult:
            break
        else:
            print("请求失败  现在重请求:" + errString)
            time.sleep(3)
            continue

    if not BoolResult:
        print("请检查失败原因:" + errString)
        # 出现错误的不杀死程序 设置数据库方便后面查看
        url = url.replace(baseurl,"")
        sql = "update `journal` set stat=2 where `url`='{}'".format(url)
        UpdateSql(sql)
        return  None
    return r


def get_page_num(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="anpager")
    b_all_tag = div_all_tag[1].find_all("b")
    page = int("".join(b_all_tag[1].stripped_strings))
    return page


def getPostData(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="anpager")
    a_all_tag = div_all_tag[2].find_all("a")
    href = a_all_tag[2]["href"]
    searchobj = re.search("doPostBack\('(.*?)','(\d+)'", href)
    __EVENTTARGET = searchobj.group(1)
    input_tag = soup.find_all("input", id="__VIEWSTATE")
    __VIEWSTATE = input_tag[0]["value"]
    return __EVENTTARGET, __VIEWSTATE


def get_page(allpage, r, filename, url):
    for page in range(2, allpage + 1):
        filePath = os.path.join(sPath, filename + "_" + str(page) + ".html")
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        __EVENTTARGET, __VIEWSTATE = getPostData(r)
        pageData = {
            '__EVENTTARGET': __EVENTTARGET,
            '__EVENTARGUMENT': str(page),
            '__VIEWSTATE': __VIEWSTATE,
            'r1': '1',
            'slt': '1',
            __EVENTTARGET + "_input": str(page - 1)
        }
        r = requestJournalsPost(url, data=pageData)
        if r:
            savefile(r, filePath)
        print("letter is:{}, all page is:{}, now page is {}".format(filename, allpage, page))


def start():
    for row in SelectFromDB():
        url = baseurl + row[0]
        dictUrl = BaseUrl.urlQuery2Dict(url)
        filename = dictUrl["issn"]
        filePath = os.path.join(sPath, filename + "_1.html")
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        r = requestJournals(url)
        savefile(r, filePath)
        page = get_page_num(r)
        print("filename is:{}, all page is:{}".format(filename, page))
        if page > 1:
            get_page(page, r, filename, url)
        sql = "update `journal` set stat=1 where `url`='{}'".format(row[0])
        UpdateSql(sql)


# Mag.aspx?issn=8439AF76-BA69-4DA0-AB22-D16783AE049A&year=2018&Issue=7
# 这个的第5页不知为什么请求出错
# 出现错误的不杀死程序 设置数据库值为2 方便后面查看
if __name__ == "__main__":
    start()
