# -*- codeding = utf-8 -*-
#@Time : 2020/7/2923:49
#@Author : Armor
#@File : tianya_keyword.py
#@Software : PyCharm

import os
import csv
import requests
from lxml import etree
import tianya_csv
import pandas as pd
import datetime
import random
from time import  sleep
import time
from threading import Semaphore
from concurrent.futures import ThreadPoolExecutor

def getHTMLResponse(url):
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document',
        'Referer': 'https://search.cn-ki.net/search?keyword=%E7%88%AC%E8%99%AB&db=CFLS&p=13',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',

    }

    try:
        r = requests.get(url,headers=headers,timeout=15)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        # 无框架延迟处理
        random_time = random.randint(3,5)
        sleep(random_time)
        return r
    except:
        print("获取html失败")

def getData(content,csv_file):
    try:
        tree_node = etree.HTML(content)

        doc_ul_lis = tree_node.xpath('//div[@class="searchListOne"]/ul/li')
        for li in doc_ul_lis:
            # 标题
            title = li.xpath('./div/h3/a')
            if title == []:
                break
            else:
                title = title[0].xpath('string(.)').strip()
                print(title)
            # 板块
            plate = li.xpath('./p/a[1]/text()')[-1]
            print(plate)
            # 作者
            author = li.xpath('./p/a[2]/text()')[-1]
            print(author)
            # 时间
            time = li.xpath('./p/span[1]/text()')[-1]
            print(time)
            # 回复
            repley_num = li.xpath('./p/span[2]/text()')[-1]
            print(repley_num)
            # 简介
            synopsis = li.xpath('./div/p')[0].xpath('string(.)').strip()
            print(synopsis)
            # link
            link_url = li.xpath('./div/h3/a/@href')[-1]
            print(link_url)
            print("---")
            csv_file.writerow([title,plate,author,time,repley_num,synopsis,link_url])
    except Exception as r:
        print("获取内容异常：{}".format(r.args))

def getPostlistCSV(url,file_list_path,headers,keyword,url_format):
    strat = time.time()
    urls = []
    # 获取 总页数 和 html 资源
    response = tianya_csv.getHTMLResponse(url)
    tree_node = etree.HTML(response.content)
    is_exist_pages = tree_node.xpath('//div[@class="long-pages"]/a')
    if is_exist_pages:
        for a in is_exist_pages:
            page_num = a.xpath('./text()')[-1]
            if page_num.isdigit():
                total_page = int(page_num)
        print("总页数：", total_page)

        for page in range(1, total_page + 1):
            url = url_format.format(keyword,page)
            urls.append(url)
            print(f"{url} is finished")
    else:
        urls.append(url)
        print(f"{url} is finished")
    print("----------------------urls集合 收集完毕  开始读取各url的html文档 ----------------------")

    with open(file=file_list_path, mode="w", newline="", encoding="utf-8") as file:
        # 创建csv写入对象
        csv_file = csv.writer(file)
        # 写入头部信息
        csv_file.writerow(headers)
        # 写入数据
        contents = []
        # with ThreadPoolExecutor(max_workers=5) as executor:
        for url in urls:
            data = getHTMLResponse(url)
            contents.append(data.content)
            print(f"{url} is finished")

        for index in range(len(contents)):
            getData(contents[index],csv_file)

    print("finished end!")
    print("用时：", time.time() - strat)

def getPosturls(file_list_path,start,end):
    date_start = datetime.datetime.strptime(start, '%Y-%m-%d')
    date_end = datetime.datetime.strptime(end, '%Y-%m-%d')
    data = pd.read_csv(file_list_path)
    # 除重复
    date = data.drop_duplicates(subset=['title', 'link_url'], keep='first', inplace=True)
    # 保留日期 转换为datetime类型
    data['time'] = data['time'].apply(lambda x: x.split(' ')[0])
    data['time'] = pd.to_datetime(data['time'])
    # 筛选特定时间范围的帖子
    target = data[(data['time'] > date_start) & ((data['time'] < date_end))]
    filenames = target['title'].values.tolist()
    urls = target['link_url'].values.tolist()
    return urls,filenames

#  收集帖子列表
def run_Collecting():
    # 关键词
    posts_urls = []
    keyword = "高考 志愿 计算机 人工智能 大数据"
    headers = ['title','plate','author','time','repley_num','synopsis','link_url']
    file_list_path = "." + os.sep + "00."+ keyword + '_list.csv'
    list_url_format = "https://search.tianya.cn/bbs?q={}&pn={}&s=10"
    list_url = list_url_format.format(keyword,1)
    # 获取 关键词帖子的列表信息
    getPostlistCSV(url=list_url,file_list_path=file_list_path,headers=headers,url_format=list_url_format,keyword=keyword)
    # pandas处理 提取 各帖子 url 和 title
    start = "2019-12-15"
    end =  "2020-01-30"
    posts_urls,posts_titles = getPosturls(file_list_path,start,end)
    print("finished collected!,关键词列表已经爬取完毕，是否要遍历爬取所有帖子的内容 (y/n)")
    flag = input()
    if flag == 'y':
        # 调用 tianya_csv 进行爬取
        for url,filename in zip(posts_urls,posts_titles):
            tianya_csv.main(url,filename)
    else:
        print("Thanks use！")

def run_Extract():
    keyword = "高考 志愿 计算机 人工智能 大数据"
    file_list_path = "." + os.sep + "00."+ keyword + '_list.csv'
    # pandas处理 提取 各帖子 link_url 和 title
    posts_urls,posts_titles = getPosturls(file_list_path,start="2010-01-01",end="2020-07-30")
    for url,filename in zip(posts_urls,posts_titles):
        tianya_csv.main(url,filename)
    print("Thanks use！")

if __name__ == "__main__":
    print("请选择爬取模式：")
    print("--------------------------------------------------------------------------------")
    print("模式1：收集含'关键字'的论坛帖子列表")
    print("模式2：已有帖子'列表'爬取各个帖子的内容")
    print("请按 1 / 2 选择模式 其余键退出")
    print("--------------------------------------------------------------------------------")
    send_key = int(input())
    if send_key == 1:
        run_Collecting()
    elif send_key == 2:
        run_Extract()
    else:
        print("exit")


