# -*- codeding = utf-8 -*-
#@Time : 2020/7/2920:16
#@Author : Armor
#@File : tianya_csv.py
#@Software : PyCharm

import os
import csv
import requests
import time
from bs4 import BeautifulSoup
from urllib.parse import unquote
from lxml import etree
import random
from time import sleep

def getHTMLResponse(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',
    }
    try:
        r = requests.get(url,headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r
    except:
        print("获取html失败")

def getURLs(url_format,total_page):
    urls = []
    for page in range(1,total_page+1):
        url = url_format.format(page)
        urls.append(url)
    return  urls

def getDate(r,csv_file):
    html_doc = r.content
    soup = BeautifulSoup(html_doc, "html.parser")

    for item in soup.find_all("div", class_="atl-item"):
        author = unquote(item.attrs["_host"])
        print(author)
        time = item.attrs.get("js_restime")
        print(time)
        content_info = item.find("div", class_="bbs-content")
        content_text = content_info.text.replace('\t','').replace('\n','').replace('\u3000','')
        print(content_text)

        csv_file.writerow([author,time,content_text])

def getPostsInfo(posts_url,content,soup,csv_file):
    urls = []
    try:
        tree_node = etree.HTML(content)
        # 获取标题
        head = soup.find("div", id="post_head")
        title = head.find("span", class_="s_title")
        title_text = title.text
        print(title_text)
        # 时间
        author_time_info = tree_node.xpath('//*[@id="post_head"]/div[2]/div[2]/span[2]/text()')[-1]
        print(author_time_info)
        info = soup.find("div",class_="atl-menu clearfix js-bbs-act")
        # 回复数
        replycount = info.attrs['js_replycount']
        print("回复数：",replycount)
        # 点击量
        clickcount = info.attrs['js_clickcount']
        print("点击数：",clickcount)
        # 作者
        pulish_time = tree_node.xpath('//*[@id="post_head"]/div[2]/div[2]/span[1]/a[1]/text()')[-1]
        print("作者：",pulish_time)
        # # 作者等级
        # author_rank = soup.find("div",class_="atl-info")
        # print("作者等级：",author_rank)

        is_exist_pages = tree_node.xpath('//*[@id="post_head"]/div[3]/div[3]')
        if is_exist_pages:
            for item in head.find_all("a"):
                if item.text.isdigit():
                    total_page = int(item.text)

            print("总页数：",total_page)

            urls = getURLs(posts_url, total_page)
        else:
            urls.append(posts_url.format(1))

        for url in urls:
            print(url)
            response = getHTMLResponse(url)
            getDate(response,csv_file)


        # csv_file.writerow([title_text,replycount,clickcount])
    except Exception  as reason:
        print("获取内容异常:",reason.args)

def main(start_url,filename):
    FILE_PATH = '.'+ os.sep + filename + '.csv'
    HEADERS = ["Author", "Time", "Content"]
    strat = time.time()
    with open(file=FILE_PATH, mode="w", newline="", encoding="utf-8") as file:
        # 创建csv写入对象
        csv_file = csv.writer(file)
        # 写入头部信息
        csv_file.writerow(HEADERS)
        # 写入数据
        select_url = []
        select_url.extend(start_url.partition('-1.'))
        posts_url = select_url[0] + "-{}." + select_url[2]
        response = getHTMLResponse(start_url)
        content = response.content
        soup = BeautifulSoup(response.text, "html.parser")
        getPostsInfo(posts_url=posts_url, content=content, soup=soup,csv_file=csv_file)
    file.close()
    print("finished end!")
    print("用时：",time.time()-strat)



if __name__ == "__main__":
    main(start_url="http://bbs.tianya.cn/post-free-6120074-1.shtml",filename="wuhan")