import requests, re, pymongo, jieba
from lxml import etree
from collections import Counter
from concurrent.futures import ThreadPoolExecutor  # 多线程
import pandas as pd
import random

# 爬些高匿代理来用用
def get_proxy():
    res = requests.get('http://www.xiladaili.com/gaoni/')
    xml = etree.HTML(res.text)
    proxy_ls = xml.xpath('//table[@class="fl-table"]/tbody/tr/td[1]/text()')
    head_ls = xml.xpath('//table[@class="fl-table"]/tbody/tr/td[2]/text()')
    return [{"https": i} if 'HTTPS' in j else {"http": i} for i, j in zip(proxy_ls, head_ls)]

# 请求链接，获取问题的响应
def each_page(url):
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
    # 异常只可能出现在请求响应的时候，解析部分已经消除能遇见的所有bug
    try:
        # p = random.choice(proxy_ls)
        response = requests.get(url,headers=headers,proxies={'https': '39.108.171.42:3128'})
    except Exception as e:
        print(e)
        return []
    html = response.text
    # if '心理' in html:
    #     print(p)
    xml = etree.HTML(html)
    href_ls = xml.xpath('//div[@class="content"]/a/@href')
    page = re.findall(r't1/p(\d*)',url)
    if page == []:
        print(f'已爬下第1页的问题链接')
    else:
        print(f'已爬下第{page[0]}页的问题链接')
    return href_ls

# 请求详情页，获取问答信息
def get_detail(url, count):
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
    try:
        response = requests.get(url,headers=headers,proxies={'https': '39.108.171.42:3128'})
    except Exception as e:
        print(e)
        return ''
    html = response.text
    xml = etree.HTML(html)
    # 问题内容,标题
    try:
        q = xml.xpath('//div[@class="content"]/h1//text()')[0]
    except IndexError:
        return ''
    # 问题补充
    q_desc = re.sub(r'\s', '', ''.join(xml.xpath('//div[@class="content"]/p//text()')))
    # # 将问题切分为词语，词频统计得到关键词作为其分类标签
    # invalid_text = re.sub(r',|\.|，|。|我|你|和|呼呼','',q+q_desc)
    # tag_ls = jieba.lcut(invalid_text)
    # dic = dict(Counter(tag_ls))
    # tag = sorted(dic.items(), key=lambda dic: dic[1], reverse=True)[0][0]
    # 回答
    ans = xml.xpath('//div[@class="col_main"]/div[@class="box comments"]')[0]
    if xml.xpath('//div[@class="title_lines"]/div/text()')[0] == '最佳答案':
        ans_id = ans.xpath('./div/div/span/text()')[0]
        ans_text = re.sub('\s', '', ''.join(ans.xpath('./div/div/p[@class="content"]//text()')))
        print(f'问题id--{count}')
        return {'标题':q,'问题补充':q_desc,'最佳回答':ans_text,'答者身份':ans_id}
    else:
        return '无满足问答'

# 保存至数据库
def save_to_mongo(collect, data, count):
    # 传入data类型为字典
    if type(data) == dict:
        try:
            collect.insert_one(data)
            count += 1
        except Exception as e:
            print(e, '存储到MongoDb失败', data)

# 主循环函数
def loop():
    # 避免去请求页码，直接分析翻页规律
    url_ls = []
    # proxy_ls = get_proxy()
    # 设定起始页和翻页数
    start, num = 0, 3000
    for page in range(start, start+num):
        # 根据规律生成num页的链接，目的是后续用多线程爬
        if page == 0:
            url = 'https://www.ydl.com/ask/t1/'
        else:
            url = f'https://www.ydl.com/ask/t1/p{page+1}'
        url_ls.append(url)
    # 多线程思路：将所有同级的请求均交给1次进程，然后开设多线程去爬，有几种级别的请求就设置几轮多线程
    # 第一轮多线程
    # 每页有10个问题，则爬完每100页应有1000条链接
    executor1 = ThreadPoolExecutor(max_workers=10)
    question_url_ls = []
    # 为减少报错引发重爬带来的时间成本，分割url_ls，分批次加入多线程
    for n in range(int(num/10)):   # 分割num/10次，每段长度10页
        ls = url_ls[n*10:(n+1)*10]
        try:
            req_each_page = executor1.map(each_page, ls)
            question_url_ls += sum([i for i in req_each_page],[])
        except Exception as e:
            print(f'第{n*10}到{(n+1)*10}页之间发生异常{e}')
            # req_each_page = executor1.map(each_page, ls)
            # question_url_ls += sum([i for i in req_each_page], [])
    print(f'有效问题链接数={len(question_url_ls)}')
    # 结果类型列表，用于传入下一轮多线程

    # 第二轮多线程
    # 线程也不能开太多，50个线程就会造成线程堆积未处理而报错
    executor2 = ThreadPoolExecutor(max_workers=10)
    info_ls = []
    for n in range(num):   # 分割num次，每段长度10条链接
        ls = question_url_ls[n * 10:(n + 1) * 10]
        if len(ls) < 5:
            break
        # 异常部分处理掉，顶多丢失10个链接的数据，至少保证其他正常数据能被存下来，数据不够就把页数往后翻再爬
        try:
            req_detail = executor2.map(get_detail,ls,range(1+n*10,1+n*10+len(ls)))
            # 得到所有问答数据
            info_ls += [i for i in req_detail]
            print('已爬取的问答数量=',len(info_ls))
        except:
            continue
    print('即将存储')
    invalid_data = [i for i in info_ls if type(i)==dict]
    pd.DataFrame(invalid_data).to_excel('心理.xlsx',index=False)
    print(f'成功存储数据量={len(invalid_data)}')

if __name__ == '__main__':
    key_ls = ['痛苦', '压力', '迷茫', '抑郁', '自卑', '焦虑', '失眠', '工作', '消沉',
              '失恋', '暴躁', '紧张', '逆反', '孤独', '烦恼', '愤怒', '恐惧', '强迫症']
    loop()