import os
import time
import urllib.parse as parse

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.mrequest import baserequest
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -3)
sPath = BaseDir.get_new_path(TopPath, "download", "cnki_cg", "download")

gRecord = 0
StartTime = time.time()
nPage = 1

Proxiesss = {
    'http': '127.0.0.1:8087',
    # 'http':'162.105.138.192:8092',
    'https': '127.0.0.1:8087'  # key是指目标网站的协议
}
mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", facade.get_streamlogger())

# # 返回数据库连接
# def MajorDbConnect():
#     return MySqlDbConnect(curPath, "db.ini")


HEADERS = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, sdch', 'Connection': 'keep-alive',
           'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
           'Referer': 'http://epub.cnki.net/kns/brief/result.aspx?dbprefix=SMSD',
           'Host': 'epub.cnki.net',
           'Accept-Language': 'zh-CN,zh;q=0.8'}
nCount = 0


# 测试文字输出到文本
def output(value, files='log.txt'):
    f = open(files, 'wb')
    f.write(value)
    f.close()


# url
# http://epub.cnki.net/kns/request/SearchHandler.ashx?action=&NaviCode=A&catalogName=SCSD_CCSCLS&ua=1.25&PageName=ASP.brief_result_aspx&DbPrefix=SMSD&DbCatalog=国内外标准数据库&ConfigFile=SMSD.xml&db_opt=中国标准数据库,国外标准数据库&db_value=中国标准数据库,国外标准数据库&his=0

# 获取下载首页


def GetHome(NaviCode, Year, nLoop=1):
    global nCount, nPage
    bRetry = False

    if nPage != 1:
        nLoop = nPage
        bRetry = True
    dirPath = os.path.join(sPath, 'Raw')
    if not os.path.exists(dirPath):
        os.makedirs(dirPath)
    filePath = os.path.join(dirPath, NaviCode + '_' + Year + '_' + str(nLoop) + '.html')
    if os.path.exists(filePath):
        # 这里需要注意 如果程序因为一些原因中断 应该删除最后一个的所有页然后重新启动
        print(filePath)
        return True

    url = r'http://kns.cnki.net/KNS/brief/result.aspx?dbprefix=SNAD'
    sn = requests.Session()
    BoolResult, errString, r = baserequest.BaseRequest(url,
                                                       sn,
                                                       # proxies=Proxies,
                                                       endstring="",
                                                       headers=HEADERS,
                                                       timeout=30
                                                       )
    if not BoolResult:
        print("请求失败")
        nPage = nLoop
        return False

    # 请求search
    url = r'http://kns.cnki.net/kns/request/SearchHandler.ashx?action='
    url += '&NaviCode=' + NaviCode
    url += '&catalogName=ZJCLS&ua=1.25&PageName=ASP.brief_result_aspx&DbPrefix=SNAD&DbCatalog=国家科技成果数据库&ConfigFile=SNAD.xml&db_opt=SNAD&db_value=国家科技成果数据库&his=0'
    BoolResult, errString, r = baserequest.BaseRequest(url,
                                                       sn,
                                                       #  proxies=Proxies,
                                                       endstring="",
                                                       headers=HEADERS,
                                                       timeout=30,
                                                       )
    if not BoolResult:
        print("请求失败")
        nPage = nLoop
        return False

    # 请求年份
    url = 'http://kns.cnki.net/KNS/group/doGroupLeft.aspx?action=1&Param=ASP.brief_result_aspx%23SNAD/%u5E74/%u5E74%2Ccount%28*%29/%u5E74/%28%u5E74%2C%27date%27%29%23%u5E74%24desc/1000000%24/-/40/40000/ButtonView'
    url = 'http://kns.cnki.net/kns/group/doGroupLeft.aspx?' + \
          parse.urlencode(dict(parse.parse_qsl(parse.urlparse(url).query)))

    BoolResult, errString, r = baserequest.BaseRequest(url, sn,
                                                       # proxies=Proxies,
                                                       endstring="", headers=HEADERS,
                                                       timeout=30
                                                       )
    if not BoolResult:
        print("请求失败")
        nPage = nLoop
        return False

    url = "http://kns.cnki.net/KNS/brief/brief.aspx?ctl=5d8f59fd-ac13-462e-aa2e-14b56d0ec4bd&dest=%E5%88%86%E7%BB%84%EF%BC%9A%E5%B9%B4%20%E6%98%AF%20{Year}&action=5&dbPrefix=SNAD&PageName=ASP.brief_result_aspx&Param=%E5%B9%B4+%3d+%27{Year}%27&SortType=%E5%B9%B4&ShowHistory=1&recordsperpage=50".format(
        Year=Year)
    while 1:
        BoolResult, errString, r = baserequest.BaseRequest(url, sn,
                                                           # proxies=Proxies,
                                                           endstring="",
                                                           headers=HEADERS, timeout=30
                                                           )
        if not BoolResult:
            print("请求失败")
            nPage = nLoop
            bRetry = True
            break

        soup = BeautifulSoup(r.text, 'lxml')
        nextTag = soup.find('a', id='Page_next')
        checkTag = soup.find('input', id='CheckCode')

        if bRetry and nextTag:
            url = 'http://kns.cnki.net/kns/brief/brief.aspx' + \
                  nextTag.get('href')
            url = url.replace('curpage=2', 'curpage=%d' % nLoop)
            print('retry url:', url)
            bRetry = False
            continue

        if not checkTag:
            dirPath = os.path.join(sPath, 'Raw')
            if not os.path.exists(dirPath):
                os.makedirs(dirPath)
            filePath = os.path.join(dirPath, NaviCode + '_' + Year + '_' + str(nLoop) + '.html')
            print(filePath)
            with open(filePath, mode='wb') as f:
                f.write(r.content)
            nCount += 1
            print('已经下载了%d页目录' % nCount)
            print('Time total:' + repr(time.time() - StartTime) + '\n')

        if not nextTag:
            if checkTag:
                print('check')
                nPage = nLoop
                bRetry = True
            else:
                print('break')
                nPage = 1
            break

        url = 'http://kns.cnki.net/kns/brief/brief.aspx' + nextTag.get('href')
        nLoop += 1

    return not bRetry


def main(logger=None):
    # 获取命令行参数
    getClassAndYearSql = 'select classes,years from class_year'
    for row in mysqlutils.SelectFromDBFetchOne(getClassAndYearSql):
        print(row[0] + ' : ' + str(row[1]))
        while 1:
            bResult = GetHome(row[0], str(row[1]))
            if bResult:
                break


if __name__ == '__main__':
    main()
