import urllib.request
import requests
import re
import csv
import random
from fontTools.ttLib import TTFont
from io import BytesIO

# 打开网页
def open_url(url):
    # 代理ip列表
    proxy_list = ['219.138.58.114:3128', '61.135.217.7:80', '101.201.79.172:808', '122.114.31.177:808']
    # 用户代理列表
    user_list = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36',
                 'User-Agent:Mozilla/5.0(compatible;MSIE9.0;WindowsNT6.1;Trident/5.0',
                 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
                 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16']

    index = random.randint(0, 3)
    # 使用代理ip的必要函数
    proxy_support = urllib.request.ProxyHandler({'http': proxy_list[index]})
    opener = urllib.request.build_opener(proxy_support)
    urllib.request.install_opener(opener)
    # 添加用户代理
    opener.addheaders = [('User-Agent', user_list[index])]
    response = urllib.request.urlopen(url)
    html = response.read()
    return html

def open_url2(url):
        # 代理ip列表
        proxy_list = ['219.138.58.114:3128', '61.135.217.7:80', '101.201.79.172:808', '122.114.31.177:808']
        # 用户代理列表
        user_list = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36',
            'User-Agent:Mozilla/5.0(compatible;MSIE9.0;WindowsNT6.1;Trident/5.0',
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16']

        index = random.randint(0, 3)
        # 使用代理ip的必要函数
        proxy_support = urllib.request.ProxyHandler({'http': proxy_list[index]})
        opener = urllib.request.build_opener(proxy_support)
        urllib.request.install_opener(opener)
        # 添加用户代理
        opener.addheaders = [('User-Agent', user_list[index])]
        response = urllib.request.urlopen(url)
        return response

def getPicture(picture, name):
    bytes = urllib.request.urlopen(picture)
    fp = open(name,"wb")
    fp.write(bytes.read())
    fp.flush()
    fp.close()

def get_font(url):
    response = open_url2(url)
    font = TTFont(BytesIO(response.read()))
    cmap = font.getBestCmap()
    font.close()
    return cmap

def get_encode(cmap, values):
    WORD_MAP = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7',
                'eight': '8', 'nine': '9', 'period': '.'}
    word_count = ''
    for value in values.split(';'):
        if(value==''):
            break;
        value = int(value[2:])
        key = cmap[value]
        word_count += WORD_MAP[key]
    return word_count

def getInformation(url):
    print(url)
    #读取每本小说的详细内容
    html = open_url(url).decode('utf-8')
    #编写各信息的正则表达式
    book_woff_url = r'万会员周点击.*?format(\'eot\'); src: url(\'.*?\') format(\'woff\'),'
    book_popularity = r'会员周点击.*?</style><span class="(.*?)">(.*?)</span></em><cite>万总推荐</cite><i>|</i><em><style>@font-face'
    information = book_woff_url + r'.*?' +book_popularity
    # 返回一个正则表达式对象
    reg = re.compile(book_popularity, re.S)
    #print(html)
    # 开始查找所有信息
    contents_list = re.findall(reg, html)
    print(contents_list)
    content = contents_list[0]
    if(content[0]!=''):
        woff_url='https://qidian.gtimg.com/qd_anti_spider/'+content[0]+'.woff'
        values=content[1]
        cmap=get_font(woff_url)
        word_count=get_encode(cmap,values)
    else:
        word_count=0

    book_info = list()
    book_info.append(word_count)
    return book_info

def getScore(id):
    url = 'https://book.qidian.com/ajax/comment/index?_csrfToken=ikTYj2nXKgS3CJJ6pGEJlh8LPuTv0cmIBtnBW0zh&bookId=' + id + '&pageSize=15'
    #rr = requests.get(url)
    #score = rr.text[16:19]
    rr = str(open_url(url))
    print(rr)
    score = int(rr[18])*1.0
    if rr[20]>='0' and rr[20]<='9':
        score += int(rr[20])*0.1
    return score

# 运用正则表达式提出作品信息
def find_contents(url):
    print(url)
    html = open_url(url).decode('utf-8')
    # 编写正则表达式
    #book_url = r'<li data-rid=".*?"><div class="book-img-box"><a href="(.*?)" data-bid="\d*?" data-eid=".*?" target="_blank"><img src="(.*?)"></a></div>'
    #picture_url = r'<div class="book-img-box"><a href=".*?" data-bid="\d*?" data-eid=".*?" target="_blank"><img src="(.*?)"></a></div>'
    picture_url = r'<a href=".*?" data-bid="\d*?" data-eid="qd_B57" target="_blank"><img src="(.*?)">'
    book_name = r'<h4><a href="(.*?)" target="_blank" data-eid="qd_B58" data-bid="(\d*?)">(.*?)</a>'
    book_author = r'<a class="name" href=".*?" data-eid=".*?" target="_blank">(.*?)</a>'
    book_type1 = r'<a href=".*?" target="_blank" data-eid=".*?">(.*?)</a>'
    book_type2 = r'<a class="go-sub-type" data-typeid="\d*?" data-subtypeid="\d*?" href="javascript:" data-eid=".*?">(.*?)</a>'
    book_state = r'<span >(.*?)</span>'
    book_intro = r'<p class="intro">(.*?)</p>'

    information = picture_url + r'.*?' + book_name + r'.*?' + book_author + r'.*?' + book_type1 + \
                  r'.*?' + book_type2 + r'.*?' + book_state + r'.*?' + book_intro
    # 返回一个正则表达式对象
    reg = re.compile(information, re.S)
    # 开始查找所有信息
    contents_list = re.findall(reg, html)
    contents = []

    for content in contents_list:
        content = list(content)
        print(content)
        picture = 'https:' + content[0]
        getPicture(picture, content[2] + '.jpg')
        book_url = 'https:' + content[1]
        book_info = getInformation(book_url)
        book_id = content[2]

        new_content = list()
        new_content.append(book_id)
        new_content.append(content[3])
        new_content.append(content[4])
        new_content.append(content[5] + '-' + content[6])
        new_content.append(content[7])
        new_content.append(getScore(book_id))
        new_content.append(book_info[0])
        new_content.append(content[8].strip())
        new_content.append(content[2] + ".jpg")
        print(new_content)
        contents.append(new_content)
    return contents

# 保存作品信息
def save_contents(contents, writer):
    # 从contents中取出一个作品信息content,写入csv文件中
    for content in contents:
        writer.writerow(content)


# 主函数
def download(filename, url, pages=1):
    # 这里是固定部分的URL
    #url = 'https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page='

    fileheader = ['ID', '作品', '作者', '类型', '状态', '评分', '热度','简介', '图片']
    with open(filename, 'w', newline='', encoding='gb18030') as f:
        csv_writer = csv.writer(f)
        # 把fileheader的内容写入csv文件中
        csv_writer.writerow(fileheader)

        # 开始遍历每个网页，爬取作品信息
        for page in range(1, pages + 1):
            page_url = url + str(page)
            # 用find_contents函数爬取当前网页的作品信息
            contents = find_contents(page_url)
            # 把contents的内容通过save_contents函数存入csv文件中
            save_contents(contents, csv_writer)


if __name__ == '__main__':
    urlList=['https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page=']
             #'https://www.qidian.com/mm/all?orderId=&style=1&pageSize=20&siteid=0&pubflag=0&hiddenField=0&page=',
             #'https://www.qidian.com/all_pub?orderId=&style=1&pageSize=20&siteid=1&pubflag=1&hiddenField=0&page=']
    for i in range(0 , len(urlList)):
        download('novel.csv', urlList[i], 10)

