# -*- coding:utf-8 -*-
import base64
import json
import re
import ssl
from time import sleep

from adsl import ADSL
from remoteDB import ArticleDB as RemoteArticleDB, ArticleDB
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from Redis import RedisClient
from lxml import etree
from config import Dir
from urllib.parse import quote, unquote
import traceback


class IData(object):
    r = RedisClient()
    db = ArticleDB()
    adsl = ADSL()

    def submit(self, title, author, content):
        chrome_options = Options()
        chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--headless')
        driver = webdriver.Chrome(options=chrome_options)
        driver.set_window_size(1920, 1080)
        try:
            driver.get("https://www.paperyy.com/member_new/thesis/post.aspx")
            driver.delete_all_cookies()
            cookies = self.r.GetCookies()
            listCookie = json.loads(cookies)
            for cookie in listCookie:
                driver.add_cookie(cookie)
            driver.refresh()
            driver.find_element_by_xpath('//*[@id="edition-1"]').click()
            driver.find_element_by_xpath('//*[@id="chachong_form"]/div/div[1]/span[1]').click()
            js_content = json.dumps(content).strip('"').replace("'", '')
            js = "var element=document.getElementById('txtContent'); element.value='" + js_content + "';"
            driver.execute_script(js)
            driver.find_element_by_id('txtTitle').send_keys(title)
            driver.find_element_by_id('txtAuthor').send_keys(author)
            driver.find_element_by_id('txtContent').send_keys(' ')
            driver.find_element_by_id('btnSubmit2').click()
            driver.implicitly_wait(10)
            driver.find_element_by_id('priovid6').click()
            driver.implicitly_wait(2)
            driver.find_element_by_xpath('/html/body/div[11]/div/div[2]/div[3]/button[1]').click()
            driver.find_element_by_xpath('//*[@id="ajaxList"]/div[2]/div[1]/div/div[3]/div[1]/div').click()
            driver.find_element_by_xpath('//*[@id="ajaxList"]/div[2]/div[1]/div/div[4]/div[1]/div').click()
            driver.find_element_by_xpath('//*[@id="submitPay"]').click()
            driver.implicitly_wait(10)
            driver.quit()
        except Exception as e:
            print(e)
            pass

    def GetUrl(self):
        cookies = self.r.GetCookies()
        cookies_dict = json.loads(cookies)
        cookies_string = ''
        name = []
        for cookie in cookies_dict:
            if cookie['name'] not in name:
                cookies_string += f'{cookie["name"]}={cookie["value"]};'
                name.append(cookie['name'])
        headers = {
            "sec-ch-ua-platform": "macOS",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-User": "?1",
            "Sec-Fetch-Dest": "document",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cookie": cookies_string
        }
        url = 'https://search.cn-ki.net/search?keyword=%E5%88%86%E6%9E%90&db=CDMD&p=1'
        r = requests.get(url, headers=headers)
        print(r.text)

    def login(self):
        chrome_options = Options()
        # chrome_options.add_argument('--no-sandbox')
        # chrome_options.add_argument('--disable-dev-shm-usage')
        # chrome_options.add_argument('--headless')
        driver = webdriver.Chrome(options=chrome_options)
        driver.set_window_size(1920, 1080)
        try:
            driver.get('https://user.cn-ki.net/login')
            driver.implicitly_wait(10)
            driver.find_element_by_id('num').send_keys('15392918237')
            driver.find_element_by_id('passwd').send_keys('yangzhe')
            driver.find_element_by_class_name('btn__content').click()
            sleep(10)
            driver.find_element_by_xpath(
                '/html/body/div/div[2]/div/main/div/div/div[1]/div/div/div[3]/button[1]').click()
            driver.implicitly_wait(10)
            driver.refresh()
            driver.get('https://www.cn-ki.net/')
            sleep(3)
            driver.find_element_by_xpath('/html/body/div[2]/div[2]/div/div/div[1]/ul/li[3]').click()
            driver.find_element_by_id('txt_SearchText').send_keys('经济')
            driver.find_element_by_xpath('/html/body/div[2]/div[2]/div/div/div[2]/form/input[2]').click()
            sleep(5)
            dictCookies = driver.get_cookies()
            print(dictCookies)
            jsonCookies = json.dumps(dictCookies)
            self.r.SetCookies(jsonCookies)
            driver.quit()
        except Exception as e:
            print(e)
            driver.quit()

    def get_ixueshu_article(self, keyword):
        referer = 'https://www.ixueshu.com/'
        for page in range(1, 50):
            try:
                headers = {
                    "sec-ch-ua-platform": "macOS",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                    "Sec-Fetch-Site": "same-origin",
                    "Sec-Fetch-Mode": "navigate",
                    "Sec-Fetch-User": "?1",
                    "Sec-Fetch-Dest": "document",
                    "Accept-Encoding": "gzip, deflate, br",
                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                    "Referer": referer
                }
                cookies = json.loads(self.r.get_cookies())
                cookieDict = {}
                for cookie in cookies:
                    cookieDict[cookie['name']] = cookie['value']

                adsl = self.r.ADSL()
                proxies = {}
                if adsl:
                    proxies = {
                        'http': 'http://xiaohengheng:950218@' + adsl,
                        'https': 'http://xiaohengheng:950218@' + adsl,
                    }
                url = f'https://www.ixueshu.com/search/index.html?search_type=thesis&q={keyword}&page={page}'
                print(url)
                response = requests.get(url, headers=headers, cookies=cookieDict, proxies=proxies, timeout=60)
                if 200 <= response.status_code < 300:
                    tree = etree.HTML(response.content)
                    articles = tree.xpath('/html/body/div[2]/div[2]/div[2]/div[2]/ul/li')

                    try:
                        max_page = tree.xpath('/html/body/div[2]/div[2]/div[2]/div[2]/div/div/div/div[2]/text()')[
                            1].strip()
                        if '/' in max_page:
                            max_page = int(max_page.split('/')[1])
                        else:
                            max_page = 0
                    except:
                        max_page = 0
                    self.db.UpdateSearchKeyMaxPages(keyword, max_page)
                    if len(articles) > 0:
                        data = []
                        for article in articles:
                            article_info = {}
                            title = article.xpath('./div/h3/a')[0].xpath('string(.)')
                            university = article.xpath('./div/div[@class="field"]/span[1]/text()')[0].split(';')[
                                1].strip()
                            year = article.xpath('./div/div[@class="field"]/span[2]/text()')[0].split('：')[1].strip()
                            ixueshu_url = article.xpath('./div/h3/a/@href')[0]
                            article_info['title'] = title
                            article_info['university'] = university
                            article_info['years'] = year
                            article_info['url'] = ixueshu_url
                            article_info['article_id'] = article.xpath('./input/@value')[0]
                            article_info['abstract'] = article.xpath('./div/div[@class="intro"]')[0].xpath('string(.)')
                            data.append(article_info)
                        for item in data:
                            self.db.InsertIntoArticleDict(item)
                        code = quote(keyword, 'utf-8')
                        referer = f'https://www.ixueshu.com/search/index.html?search_type=thesis&q={code}&page={page}'
                    else:
                        return page
                else:
                    print(response.status_code)
                    if response.status_code == 403:
                        sleep(10)
                sleep(10)
            except Exception as e:
                traceback.print_exc()
        return page

    def get_koovin_article(self, keyword):
        referer = 'http://www.koovin.com/'
        for page in range(1, 50):
            try:
                headers = {
                    "sec-ch-ua-platform": "macOS",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                    "Sec-Fetch-Site": "same-origin",
                    "Sec-Fetch-Mode": "navigate",
                    "Sec-Fetch-User": "?1",
                    "Sec-Fetch-Dest": "document",
                    "Accept-Encoding": "gzip, deflate, br",
                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                    "Referer": referer
                }
                cookies = json.loads(self.r.get_cookies())
                cookieDict = {}
                for cookie in cookies:
                    cookieDict[cookie['name']] = cookie['value']

                adsl = self.r.ADSL()
                proxies = {}
                if adsl:
                    proxies = {
                        'http': 'http://xiaohengheng:950218@' + adsl,
                        'https': 'http://xiaohengheng:950218@' + adsl,
                    }
                url = f'http://www.koovin.com/?q={keyword}&p={page}'
                print(url)
                response = requests.get(url, headers=headers, timeout=60)
                if 200 <= response.status_code < 300:
                    tree = etree.HTML(response.content)
                    articles = tree.xpath('/html/body/div[2]/div[1]/div[2]/div')
                    self.db.UpdateSearchKeyMaxPages(keyword, 50)

                    if len(articles) > 0:
                        data = []
                        for article in articles:
                            article_info = {}
                            title = article.xpath('./h2/a')[0].xpath('string(.)')
                            university = article.xpath('./div/div[@class="field"]/span[1]/text()')[0].split(';')[
                                1].strip()
                            year = article.xpath('./div/div[@class="field"]/span[2]/text()')[0].split('：')[1].strip()
                            ixueshu_url = article.xpath('./div/h3/a/@href')[0]
                            article_info['title'] = title
                            article_info['university'] = university
                            article_info['years'] = year
                            article_info['url'] = ixueshu_url
                            article_info['article_id'] = article.xpath('./input/@value')[0]
                            article_info['abstract'] = article.xpath('./div/div[@class="intro"]')[0].xpath('string(.)')
                            data.append(article_info)
                        for item in data:
                            self.db.InsertIntoArticleDict(item)
                        code = quote(keyword, 'utf-8')
                        referer = f'http://www.koovin.com/?q={keyword}&p={page}'
                    else:
                        return page
                else:
                    print(response.status_code)
                    if response.status_code == 403:
                        sleep(10)
                sleep(10)
            except Exception as e:
                print(e)
        return page

    def download(self, url):
        cookies = self.r.GetCookies()
        cookies_dict = json.loads(cookies)
        cookies_string = ''
        name = []
        for cookie in cookies_dict:
            if cookie['name'] not in name:
                cookies_string += f'{cookie["name"]}={cookie["value"]};'
                name.append(cookie['name'])
        headers = {
            "sec-ch-ua-platform": "macOS",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-User": "?1",
            "Sec-Fetch-Dest": "document",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cookie": cookies_string
        }
        r = requests.get(url, headers=headers)
        print(r.text)
        tree = etree.HTML(r.text)
        download_url = tree.xpath('/html/body/div[1]/div/div/div[2]/div[3]/div/a/@href')[0]
        return download_url

    def getPeriodicalInfo(self, article_id):
        headers = {
            "sec-ch-ua-platform": "macOS",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-User": "?1",
            "Sec-Fetch-Dest": "document",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        }
        sign = str(base64.b64encode((article_id + '_xs!@#123').encode('utf-8')), encoding='utf-8')
        url = f"https://www.ixueshu.com/document/search/relate-pc?docId={article_id}&size=100&sign={sign}"
        try:
            key, adsl = self.r.getADSL()
        except:
            self.adsl.init(1)
            key, adsl = self.r.getADSL()
        proxies = {}
        if adsl:
            proxies = {
                'http': 'http://dujhre:4l9axcjk@' + adsl,
                'https': 'http://dujhre:4l9axcjk@' + adsl,
            }
        sqldata = []
        try:
            r = requests.get(url, headers=headers, proxies=proxies, timeout=10)
            if 300 > r.status_code >= 200:
                result = json.loads(r.text)
                print(len(result['body']))
                aritcle_list = []
                for item in result['body']:
                    author = ''.join(item['authors_cn'])
                    title = item['title_cn']
                    verify = title + '##' + author
                    if f'{title}##{author}' not in aritcle_list:
                        aritcle_list.append(f'{title}##{author}')
                        if self.db.VerifyPeriodical(title,author[0:50]):
                            print(verify)
                            sqldata.append(item)
        except:
            pass
        print(len(sqldata))
        return sqldata


if __name__ == '__main__':
    db = ArticleDB()
    bot = IData()
    sqldata = bot.getPeriodicalInfo('1200000001655511')
    db.InsertIntoPeriodical(sqldata)
    # # bot.login()
    # while True:
    #     article = db.DownloadUrl()
    #     download_url = bot.download(article[1])
    #     bot.download_file(download_url, article[2], article[3], article[8])
    #     sleep(10)
