import os
import random
import time
import urllib.parse as parse

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -3)
sPath = BaseDir.get_new_path(TopPath, "download", "cnki_bz", "download")

gRecord = 0
StartTime = time.time()
nPage = 1


Proxies = {
    'http': '192.168.30.4:8080',
    'https': '192.168.30.4:8080'  # key是指目标网站的协议
}

def get_proxys():
    listproxy = ["192.168.30.176:8120", "192.168.30.176:8120", "192.168.30.176:8119",
                 "192.168.30.176:8184", "192.168.30.176:8207", "192.168.30.176:8076",
                 "192.168.30.176:8030", "192.168.30.176:8135", "192.168.30.176:8012",
                 "192.168.30.176:8033", "192.168.30.176:8004", "192.168.30.176:8171",
                 "192.168.30.176:8031", "192.168.30.176:8011", "192.168.30.176:8160",
                 "192.168.30.123:8081", "192.168.30.176:8165", "192.168.30.176:8039",
                 "192.168.30.176:8041", "192.168.30.176:8098", "192.168.30.176:8231",
                 "192.168.30.176:8140", "192.168.30.176:8082", "192.168.30.176:8182"]
    proxy = random.sample(listproxy, 1)
    proxy = proxy[0]
    Proxies = {
        'http': proxy,
        # 'http':'162.105.138.192:8092',
        'https': proxy  # key是指目标网站的协议
    }
    print(Proxies)

    return Proxies


HEADERS = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, sdch', 'Connection': 'keep-alive',
           'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',
           'Referer': 'http://epub.cnki.net/kns/brief/result.aspx?dbprefix=SMSD',
           'Host': 'epub.cnki.net',
           'Accept-Language': 'zh-CN,zh;q=0.8'}
nCount = 0
mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", facade.get_streamlogger())


# 测试文字输出到文本
def output(value, files='log.txt'):
    f = open(files, 'wb')
    f.write(value)
    f.close()


def GetHome(NaviCode, Year, nLoop=1):
    """

    :param NaviCode: NaviCode 标准类别代号
    :param Year: year 下载年份
    :param nLoop: nLoop 用作保存下载失败时当前页码，以便断点
    :return:
    """
    global nCount, nPage
    # Proxies = get_proxys()
    bRetry = False
    filePath = os.path.join(sPath, 'Raw', NaviCode + '_' + Year + '_' + str(nLoop) + '.html')
    if os.path.exists(filePath):
        # 这里需要注意 如果程序因为一些原因中断 应该删除最后一个的所有页然后重新启动
        print("文件存在：" + filePath)
        return True

    url = r'http://kns.cnki.net/kns/brief/result.aspx?dbPrefix=CISD'
    sn = requests.Session()
    BoolResult, errString, r = facade.BaseRequest(url,
                                                  sn=sn,
                                                  proxies=Proxies,
                                                  endstring="",
                                                  headers=HEADERS,
                                                  timeout=30
                                                  )
    if not BoolResult:
        print("请求失败")
        nPage = nLoop
        return False

    # 请求search
    url = r'http://kns.cnki.net/kns/request/SearchHandler.ashx?action='
    url += '&NaviCode=' + NaviCode
    url += '&catalogName=SCSD_CCSCLS&ua=1.21&PageName=ASP.brief_result_aspx&DbPrefix=CISD&DbCatalog=标准数据总库&ConfigFile=CISD.xml&db_opt=CISD&his=0'
    BoolResult, errString, r = facade.BaseRequest(url,
                                                  sn=sn,
                                                  proxies=Proxies,
                                                  endstring="",
                                                  headers=HEADERS,
                                                  timeout=30,
                                                  )
    if not BoolResult:
        nPage = nLoop
        print('network 102')
        return False

    # 请求年份
    url = 'http://kns.cnki.net/kns/group/doGroupLeft.aspx?action=1&Param=ASP.brief_result_aspx%23CISD/%u5E74/%u5E74%2Ccount%28*%29/%u5E74/%28%u5E74%2C%27date%27%29%23%u5E74%24desc/1000000%24/-/40/40000/ButtonView&cid=0&clayer=0'
    url = 'http://kns.cnki.net/kns/group/doGroupLeft.aspx?' + parse.urlencode(
        dict(parse.parse_qsl(parse.urlparse(url).query)))
    BoolResult, errString, r = facade.BaseRequest(url,
                                                  sn=sn,
                                                  proxies=Proxies,
                                                  endstring="",
                                                  headers=HEADERS,
                                                  timeout=30
                                                  )
    if not BoolResult:
        nPage = nLoop
        print('network 102')
        return False

    if nPage != 1:
        nLoop = nPage
        bRetry = True

    url = "http://kns.cnki.net/kns/brief/brief.aspx?ctl=cb3e3136-313b-4f22-b559-5a06005ef67e&dest=%E5%88%86%E7%BB%84%EF%BC%9A%E5%B9%B4%20%E6%98%AF%20{Year}&action=5&dbPrefix=CISD&PageName=ASP.brief_result_aspx&Param=%e5%b9%b4+%3d+%27{Year}%27&SortType=%e5%b9%b4&ShowHistory=1&recordsperpage=50".format(
        Year=Year)
    while 1:
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=sn,
                                                      proxies=Proxies,
                                                      endstring="",
                                                      headers=HEADERS,
                                                      timeout=30,
                                                      )
        if not BoolResult:
            nPage = nLoop
            bRetry = True
            print('network 102')
            break

        soup = BeautifulSoup(r.text, 'lxml')
        nextTag = soup.find('a', id='Page_next')
        checkTag = soup.find('input', id='CheckCode')

        if bRetry and nextTag:
            url = 'http://kns.cnki.net/kns/brief/brief.aspx' + nextTag.get('href')
            url = url.replace('curpage=2', 'curpage=%d' % nLoop)
            print('retry url:', url)
            bRetry = False
            continue

        if not checkTag:
            if not os.path.exists(os.path.join(sPath, 'Raw')):
                os.makedirs(os.path.join(sPath, 'Raw'))
            filePath = os.path.join(sPath, 'Raw', NaviCode + '_' + Year + '_' + str(nLoop) + '.html')
            print(filePath)
            with open(filePath, mode='wb') as f:
                f.write(r.content)
            nCount += 1
            print('已经下载了%d页目录' % nCount)
            print('Time total:' + repr(time.time() - StartTime) + '\n')

        if not nextTag:
            if checkTag:
                print('check')
                nPage = nLoop
                bRetry = True
            else:
                print('break')
                nPage = 1
            break

        url = 'http://kns.cnki.net/kns/brief/brief.aspx' + nextTag.get('href')
        nLoop += 1

    return not bRetry


def main(logger=None):
    # 获取命令行参数
    getClassAndYearSql = 'select classes,years from class_year'
    # getClassAndYearSql = "select classes,years from class_year where classes = 'G' and years = '1994'"
    for row in mysqlutils.SelectFromDBFetchOne(getClassAndYearSql):
        print(row[0] + ' : ' + str(row[1]))
        while 1:
            bResult = GetHome(row[0], str(row[1]))
            if bResult:
                break


if __name__ == "__main__":
    main()
