import requests
from lxml import etree
import time, random
from difflib import SequenceMatcher
from header_random import get_random_header, get_random_proxies, get_random_cookies

BASE_URL = "https://www.qb5200.tw"
SEARCH_URL = "https://www.qb5200.tw/s.php"
# https://www.qb5200.tw/s.php?q=

def string_similar(a, b):
    """返回字符串a与字符串b的相似度"""
    return SequenceMatcher(None, a, b).ratio()


def get_novel_detail(novel_div_obj=""):
    """获取小说搜索页的信息"""
    novel_dict = {}
    novel_dict['name'] = "".join(novel_div_obj.xpath("./div[@class='bookinfo']/h4[@class='bookname']/a/text()"))
    novel_dict['author'] = "".join(novel_div_obj.xpath("./div[@class='bookinfo']/div[@class='author']/text()")).strip("作者：")
    novel_dict['url'] = BASE_URL + "".join(novel_div_obj.xpath("./div[@class='bookinfo']/h4[@class='bookname']/a/@href"))
    novel_dict['img_url'] = BASE_URL + "".join(novel_div_obj.xpath("./div[@class='bookimg']/a/img/@src"))
    novel_dict['novel_category'] = "".join(novel_div_obj.xpath("./div[@class='bookinfo']/div[@class='cat']/text()")).strip("分类：")
    novel_dict['desc'] = "".join(novel_div_obj.xpath("./div[@class='bookinfo']/p/text()"))
    novel_dict['update_chapter'] = "".join(novel_div_obj.xpath("./div[@class='bookinfo']/div[@class='update']/a/text()"))
    novel_dict['update_chapter_url'] =BASE_URL + "".join(novel_div_obj.xpath("./div[@class='bookinfo']/div[@class='update']/a/@href"))
    novel_dict['publish_date'] = ""
    return novel_dict


def search_novels(novel_name=""):
    """通过小说名称抓取, 小说信息"""
    global SEARCH_URL
    # 01对小说名称进行搜索, 获取小说url连接
    search_novel_url = SEARCH_URL + "?q={}".format(novel_name)
    #res = requests.request('GET', search_novel_url, headers=get_random_header(), proxies=get_random_proxies(), cookies=get_random_cookies())
    res = requests.request('GET', search_novel_url, headers=get_random_header(), cookies=get_random_cookies(), verify=False)

    html = etree.HTML(res.text)
    novel_div_list = html.xpath("//div[@class ='p10']")
    search_novel_list = []
    for novel_div_obj in novel_div_list:
        novel_dict = get_novel_detail(novel_div_obj)
        search_novel_list.append(novel_dict)
    return search_novel_list


def get_novel_base_info(nurl=""):
    """获取小说基本信息, 获取章节信息"""
    novel_dict = {}
    #res = requests.request('GET', nurl, headers=get_random_header(), proxies=get_random_proxies(), cookies=get_random_cookies())
    res = requests.request('GET', nurl, headers=get_random_header(), cookies=get_random_cookies(), verify=False)

    novel_html = etree.HTML(res.text)
    # 01 匹配小说基本信息
    novel_dict['name'] = "".join(novel_html.xpath("//div[@class='path']/div[@class='p']/a[2]/text()"))
    novel_dict['author'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[1]/text()")).strip("作者：")
    novel_dict['url'] = nurl
    novel_dict['img_url'] = BASE_URL + "".join(novel_html.xpath("//div[@class ='info']/div[@ class ='cover']/img/@src"))
    novel_dict['novel_category'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[2]/text()")).strip("分类：")

    novel_dict['publish_state'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[3]/text()")).strip("状态：")
    novel_dict['desc'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='intro']/text()"))
    novel_dict['update_chapter'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[@class='last'][2]/a/text()"))
    novel_dict['update_chapter_url'] = BASE_URL + "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[@class='last'][2]/a/@href"))
    novel_dict['publish_date'] = "".join(novel_html.xpath("//div[@class='info']/div[@class='small']/span[@class='last'][1]/text()")).strip("更新时间：")
    # 02 匹配小说章节信息
    chapter_list = novel_html.xpath("//div[@class='listmain']/dl/dd/a")
    chapter_dict_list = []
    for c_obj in chapter_list:
        tmp = {}
        tmp['name'] = c_obj.text
        tmp['url'] = BASE_URL + c_obj.get('href')
        tmp['content'] = "" # todo
        chapter_dict_list.append(tmp)
    # 02.1 针对小说章节去重, 先反转后去重
    chapter_dict_list = list(reversed(chapter_dict_list))
    novel_dict['chapters'] = chapter_dict_list
    return novel_dict


def get_chapter_info(chapter_url=""):
    """获取章节内容"""
    chapter_dict = {}
    chapter_url = chapter_url
    print("[get_chapter_info]: {}".format(chapter_url))
    time.sleep(random.randint(1, 2))
    #res = requests.request('GET', chapter_url, headers=get_random_header(), proxies=get_random_proxies(), cookies=get_random_cookies())
    res = requests.request('GET', chapter_url, headers=get_random_header(), cookies=get_random_cookies(), verify=False)
    chapter_html = etree.HTML(res.text)
    chapter_dict['content'] = "".join(chapter_html.xpath("//div[@class='content']/div[@id='content']/text()"))
    chapter_dict['url'] = chapter_url
    return chapter_dict




if __name__ == '__main__':
    #search_novel_list = search_novels(novel_name="皇")
    #print(search_novel_list)
    #get_novel_base_info(nurl="https://www.qb5200.tw/xiaoshuo/63/63073/")
    a = "玄幻"
    b = "玄魔法幻"
    d = string_similar(b, a)
    print(d)
