import requests, re, pymongo
from lxml import etree
from concurrent.futures import ThreadPoolExecutor  # 多线程

# 请求链接，获取问题的响应
def each_page(url):
    response = requests.get(url)
    html = response.text
    xml = etree.HTML(html)
    href_ls = ['https://www.xinli001.com'+href for href in xml.xpath('//p[@class="title"]/a/@href')]
    page = re.findall(r'page=(\d*)',url)[0]
    print(f'已爬下第{page}页的问题链接')
    return href_ls

# 请求详情页，获取问答信息
def get_detail(url,count):
    response = requests.get(url)
    html = response.text
    xml = etree.HTML(html)
    # 问题内容
    try:
        q = xml.xpath('//p[@class="title"]/span/text()')[0]
    except IndexError:
        print('第2轮多线程开太高了，调低一点，否则有些问题的抓取会遗漏')
        return {}
    # 问题补充
    q_desc = re.sub(r'\s', '', xml.xpath('//p[@class="text"]/text()')[0])
    # 获取问题标签，做分类用
    tag = xml.xpath('//ul[@class="label detail-tag"]/a/li/text()')
    # 回答
    a = xml.xpath('//ul[@class="content-ans"]/li')
    ans_ls = []
    for ans in a:
        text = re.sub(r'\s', '', ''.join(ans.xpath('./div[@class="text"]/text()')))
        zan = ans.xpath('./div[@class="label"]/span[@class="answer_zan"]/a/font/text()')[0]
        if int(zan) > 0 and text != '':
            ans_ls.append({'内容':text,'有用数':int(zan)})
    print(f'问题id--{count}')
    if q and q_desc and tag and ans_ls:
        return {'标题':q,'问题补充':q_desc,'标签':tag,'回答':ans_ls}

# 保存至数据库
def save_to_mongo(collect, data):
    # 传入data类型为字典
    try:
        collect.insert_one(data)
    except Exception as e:
        print(e, '存储到MongoDb失败', data)

# 主循环函数
def loop(key_ls):
    # 避免去查找页码，直接分析翻页规律
    url_ls = []
    for page in range(20):
        url = f'https://www.xinli001.com/qa?page={page+1}'
        url_ls.append(url)
    # 多线程思路：将所有同级的请求均交给1次进程，然后开设多线程去爬，有几种级别的请求就设置几轮多线程
    # 第一轮多线程
    # 每页有10个问题，则爬完每100页应有1000条链接
    executor1 = ThreadPoolExecutor(max_workers=10)
    req_each_page = executor1.map(each_page, url_ls)
    # sum函数的奇技淫巧(简洁)，若此处不用sum，则得到的列表为100*10的嵌套列表，后续多线程请求需要降维
    question_url_ls = sum([i for i in req_each_page],[])
    # 如此，得到了整整500页的所有问题链接5000条，结果类型列表，用于传入下一轮多线程

    # 第二轮多线程
    executor2 = ThreadPoolExecutor(max_workers=2)
    req_detail = executor2.map(get_detail,question_url_ls,range(1,1+len(question_url_ls)))
    # 得到所有有用数大于0的问答数据
    info_ls = [i for i in req_detail]
    print(info_ls,len(info_ls))
    # 数据库存储用多线程会报错，就for循环存一下
    print('即将存储')
    # 连接数据库
    client = pymongo.MongoClient('localhost')
    for info in info_ls:
        for key in key_ls:
            if key in info['标签']:
                collect = client['mydb'][key]
            else:
                collect = client['mydb']['综合']
            save_to_mongo(collect, info)

if __name__ == '__main__':
    key_ls = ['痛苦', '压力', '迷茫', '抑郁', '自卑', '焦虑', '失眠', '工作', '消沉',
              '失恋', '暴躁', '紧张', '逆反', '孤独', '烦恼', '愤怒', '恐惧', '强迫症']
    loop(key_ls)