# -*- coding:utf-8 -*-
import re
import json
import time
import random
import traceback
import requests
from lxml import etree

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'none',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}


def get_session():
    req_session = requests.Session()
    req_session.headers.update(HEADERS)
    return req_session


def get_response(session,url):
    try:
        response = session.get(url,  timeout=60)
    except:
        print(traceback.format_exc())
        response = session.get(url,  timeout=60)
    return response

def get_reply_list(session, basic_url):
    """获取帖子列表"""
    url = basic_url
    one_post =''
    while True:
        try:
            print('帖子列表链接', url)
            time.sleep(5)
            res =get_response(session,url)
            html_res = etree.HTML(res.text)
            if html_res is not None:
                post_title = html_res.xpath('//div[@class="thread-subject"]/text()')
                if post_title:
                    post_title = ''.join(post_title)
                div_list = html_res.xpath('//div[@class="postmessage-content t_msgfont"]')
                content_list = []
                for one_div in div_list:
                    content = one_div.xpath('./span/text()')
                    if content:
                        content = ''.join(content)
                        content = content.replace('\n', '').replace('\r', '').replace('\t', '').replace('[]', '')
                        content_list.append(content)
                if content_list:
                    one_post = {'post_href': url, 'post_title': post_title, 'content_list': content_list}
                    print(one_post['content_list'][0])
                    save_data(one_post)
            else:
                print('获取评论列表异常')
                print(res.text)
                break
            next_url = html_res.xpath('//div[@class="pagination-buttons"]/a[@class="next"]/@href')
            if next_url:
                url = 'https://www.discuss.com.hk/' + next_url[0]
            else:
                print('获取下一页连接失败')
                break
        except:
            print(traceback.format_exc())
            save_data(one_post)
            break


def save_data(data):
    with open('6.9.休闲娱乐-综艺reply.tsv', 'a+', encoding='utf-8') as f:
        f.write(json.dumps(data,ensure_ascii=False))
        f.write('\n')

if __name__ == '__main__':
    session = get_session()
    url_list =[
        'https://www.discuss.com.hk/viewthread.php?tid=14636042',
        'https://www.discuss.com.hk/viewthread.php?tid=14215742',
        'https://www.discuss.com.hk/viewthread.php?tid=14019280',
        'http://www.discuss.com.hk/viewthread.php?tid=13769311',
        'http://www.discuss.com.hk/viewthread.php?tid=13607963',
        'http://www.discuss.com.hk/viewthread.php?tid=13203089',
        'http://www.discuss.com.hk/viewthread.php?tid=12631875',
               ]
    for one_url in url_list:
        get_reply_list(session, one_url)
        print('当前连接结束时间为',time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),one_url)
    # get_post_list(session, url)
    print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
