import urllib.parse
from time import sleep

from tools import create_root_node
import requests
from GzLibCookie import headers as gzlibheaders
from Redis import RedisClient
from remoteDB import ArticleDB
import traceback

class Gzlib(object):
    r = RedisClient()
    db = ArticleDB()

    def getAricles(self, searchq):
        headers = gzlibheaders
        searchq_quote = urllib.parse.quote(searchq)
        for page in range(1, 50):
            try:
                adsl = self.r.ADSL()
                proxies = {}
                if adsl:
                    proxies = {
                        'http': 'http://xiaohengheng:950218@' + adsl,
                        'https': 'http://xiaohengheng:950218@' + adsl,
                    }
                url = f"http://jour.gzlib.org/searchThesis?sw={searchq_quote}&allsw=&bCon=&ecode=utf-8&channel=searchThesis&Field=1&Pages={page}"
                searchResponse = requests.get(url, headers=headers, proxies=proxies)
                if searchResponse.status_code == 200:
                    html = searchResponse.text
                    tree = create_root_node(html)
                    articles = tree.xpath('//div[@class="book1"]')
                    try:
                        max_page_node = tree.xpath('//div[@id="searchinfo"]/font[@color="red"]')[1].xpath("./text()")[0]
                        max_page = 50 if int(max_page_node) > 50 else max_page_node
                    except Exception as e:
                        traceback.print_exc()
                        max_page = 0
                    self.db.UpdateSearchKeyGzlibMaxPages(searchq, max_page)
                    if len(articles)>0:
                        data = []
                        for article in articles:
                            article_info = {}
                            info = article.xpath("./div[@class='book']/span[@id='m_fl']/div[@class='fc-green']")[0].xpath('string(.)').strip()
                            titleObject = article.xpath("./div[@class='book']/a")[0].xpath('string(.)')
                            print(titleObject)
                            article_info['title'] = str(titleObject)
                            article_info['author'] = info.split('  ')[0].replace('作者：','')
                            article_info['university'] = info.split('  ')[1].replace('学位授予单位：','')
                            article_info['level'] = info.split('  ')[2].replace('学位名称：', '')
                            article_info['years'] = info.split('  ')[3].replace('学位年度：','')
                            try:
                                article_info['url'] ="http://jour.gzlib.org" + article.xpath("./div[@class='book']/div[@class='get']/a")[0].xpath('./@href')[0]
                            except:
                                article_info['url'] = ''
                            data.append(article_info)
                        for item in data:
                            self.db.InsertIntoArticleDictFromGzLib(item)
                    else:
                        return page
                else:
                    print(searchResponse.status_code)
                    return page
            except:
                traceback.print_exc()
                return page
            sleep(10)
        return page

    def download(self, url):

        pass


if __name__ == '__main__':
    bot = Gzlib()
    db = ArticleDB()
    cursor = db.conn.cursor()
    cursor.execute('select * from search_key where search_key.gzlib_used=0')
    keys = cursor.fetchall()
    for key in keys:
        res = bot.getAricles(key[1])
        cursor.execute(f"update search_key set gzlib_used={res} where search_key.key='{key[1]}'")
        db.conn.commit()