# -*- coding:utf-8 -*-
import json
import random
import re
import sys
import time
import traceback

import requests
from lxml import etree

import data_access
from util import net_util, proxy_util, serial_util, file_util
from util.proxy_util import delete_proxy

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
    'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'none',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}

# 替换字符
pattern = re.compile(r'[\n|\r|\t|\[|\]]')


def get_session():
    req_session = requests.Session()
    req_session.headers.update(HEADERS)
    return req_session


# def get_response(session, url):
#     try:
#         response = session.get(url, timeout=60)
#     except:
#         print(traceback.format_exc())
#         response = session.get(url, timeout=60)
#     return response


def get_response(url):
    retry_count = 3
    # proxy = proxy_util.get_proxy()
    proxy = proxy_util.get_bl_proxy()
    while retry_count > 0:
        try:
            response = requests.get(url, headers=HEADERS, proxies=proxy, timeout=60)
            # response = requests.get(url, headers=HEADERS, timeout=60)
            return response
        except Exception:
            print(traceback.format_exc())
            retry_count -= 1
    delete_proxy(proxy)
    return None


def get_total_pages(basic_url):
    """获取帖子分页"""
    res = get_response(basic_url)
    html_res = etree.HTML(res.text)
    pages = html_res.xpath('//div[@class="pagination-buttons"]/a[@class="last"]/@data-pn')
    if pages:
        return pages[0]


def get_post_list(basic_url):
    """获取帖子列表"""
    url = basic_url
    while True:
        one_post = ''
        try:
            print('帖子列表链接', url)
            time.sleep(random.randint(1, 10))
            res = get_response(url)
            html_res = etree.HTML(res.text)
            tbody_list = html_res.xpath('//tbody[contains(@id,"normalthread_")]')
            if tbody_list:
                for one_tbody in tbody_list:
                    post_href = one_tbody.xpath('./tr/th/span[@class="tsubject"]/a/@href')
                    if post_href:
                        post_href = 'https://www.discuss.com.hk/' + post_href[0]
                    post_title = one_tbody.xpath('./tr/th/span[@class="tsubject"]/a/text()')
                    if post_title:
                        post_title = ''.join(post_title)
                    print('帖子链接为：', post_href)
                    content_list = get_post_info(post_href)
                    one_post = {'post_href': post_href, 'post_title': post_title, 'content_list': content_list}
                    print(one_post)
                    if content_list:
                        print('帖子内容为：', content_list[0])
                        save_data(content_list)
            else:
                print('获取帖子列表异常')
                print(res.text)
                break
            next_url = html_res.xpath('//div[@class="pagination-buttons"]/a[@class="next"]/@href')
            if next_url:
                url = 'https://www.discuss.com.hk/' + next_url[0]
            else:
                print('获取下一页连接失败')
                break
        except:
            print(traceback.format_exc())
            save_data(content_list)
            break


def get_post_info(post_url):
    time.sleep(random.randint(3, 15))
    total_pages = get_total_pages(post_url)
    pages = int(total_pages) if total_pages else 1

    content_list = []
    for page in range(pages):
        post_url += f'&page={page + 1}'
        post_res = get_response(post_url)
        html_res = etree.HTML(post_res.text)
        if html_res is not None:
            div_list = html_res.xpath('//div[@class="postmessage-content t_msgfont"]')
            for one_div in div_list:
                content = one_div.xpath('./span/text()')
                if content:
                    content = ''.join(content)
                    # content = content.replace('\n', '').replace('\r', '').replace('\t', '').replace('[]','')
                    content = pattern.sub('', content)
                    content_list.append(content)
        else:
            print(post_res.text)
            print('链接请求错误')

    return content_list


def save_data(data):
    project_path = file_util.get_project_path()
    ip_address = net_util.get_host_ip()
    with open(f'{project_path}/data/{ip_address}-data.tsv', 'a+', encoding='utf-8') as f:
        if data:
            f.write(json.dumps(data, ensure_ascii=False))
            f.write('\n')


if __name__ == '__main__':
    session = get_session()
    # url = 'https://www.discuss.com.hk/forumdisplay.php?fid=204&page=180' #风水
    # url ='https://www.discuss.com.hk/forumdisplay.php?fid=212' #棋牌
    # url ='https://www.discuss.com.hk/forumdisplay.php?fid=417&page=3' #攀登
    url_list = [
        #  'https://www.discuss.com.hk/forumdisplay.php?fid=931',
        #  'https://www.discuss.com.hk/forumdisplay.php?fid=930',
        #  'https://www.discuss.com.hk/forumdisplay.php?fid=524',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=420',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=996',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=644',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=222',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=220',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=378', # 艺术
        #         'https://www.discuss.com.hk/forumdisplay.php?fid=771',
        #         'https://www.discuss.com.hk/forumdisplay.php?fid=770',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=131',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=329',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=330',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=769',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=683&filter=type&orderby=new_lastpost&ascdesc=DESC&typeids=712%2C714'
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=307',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=308',
        # 'https://www.discuss.com.hk/forumdisplay.php?fid=309',
        'https://www.discuss.com.hk/forumdisplay.php?fid=415&filter=0&orderby=dateline&ascdesc=DESC&page=1'
    ]

    """
    通过传参调用
    eg: python discuss_spider.py 'https://www.discuss.com.hk/forumdisplay.php?fid=415&filter=0&orderby=dateline&ascdesc=DESC&page=1'
    """
    # url = sys.argv[1]
    # get_post_list(session, url)
    # print('当前连接结束时间为', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), url)

    # 按机器编号传参
    page = sys.argv[1]
    url_list = data_access.get_range_url(int(page))
    # url_list = data_access.get_range_url(1)

    # processed url
    # process_url = serial_util.load_data()
    for one_url in url_list:
        print(one_url)
        try:
            legal_url = one_url[0].replace(' ', '')
            if serial_util.is_processed(legal_url):
                print(f"{legal_url} has been processed...")
                continue
            get_post_list(legal_url)
            print('当前连接结束时间为', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), one_url)
            serial_util.append_data(legal_url)
        except Exception as e:
            print(e)
            pass

    print("所有连接处理完毕!")
