import codecs
import json
import os
import re
import sys
import time
from datetime import datetime

import config_setting
from logger import Logger
from util import format_path

import requests
from lxml import etree

headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'text/html, application/xhtml+xml, */*',
        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
    }

detail_base_path = "E:/btbbt_data_listing"

base_url = "https://www.1lou.info"


def process_listing_html(forum, page):
    listing = []
    if page <= 0:
        return False  # return True = continue; return False = stop;
    if page == 1:
        page_src = f"{base_url}/{forum}.htm"
    else:
        page_src = f"{base_url}/{forum}-{page}.htm?orderby=tid&digest=0"
    persistence_path = f"{detail_base_path}/{config_setting.batch_name}/cache/{forum}/listing_{forum}_{page}.html"
    if not os.path.isdir(os.path.dirname(persistence_path)) and os.path.dirname(persistence_path) != "":
        os.makedirs(os.path.dirname(persistence_path))
    if os.path.exists(persistence_path):
        print(f"use cached html {persistence_path} from {page_src}")
        with open(persistence_path, 'r', encoding='utf-8') as html_file:
            content = html_file.read()
    else:
        response = requests.request("GET", page_src, timeout=18, headers=headers)
        content = response.content
        with open(persistence_path, "w", encoding='utf-8') as html_file:
            html_file.write(response.text)
        print(f"cached html {page_src} to {persistence_path}")
        time.sleep(0.6)

    tree = etree.HTML(content)
    # href_array = tree.xpath("//div[@class='card-body']/ul/li//a")
    # href_array = [x for x in href_array if x.attrib['href'].startswith("thread-")]
    li_array = tree.xpath("//div[@class='card-body']/ul/li")

    count = 0
    select_one_date = None
    for li in li_array:
        tags = []
        href_list = li.xpath("div[@class='media-body']/div[contains(@class, 'subject')]/a")
        for href in href_list:
            if href.attrib['href'].startswith("thread-"):
                text = href.text
                link = base_url + "/" + href.attrib['href']
                match = re.search(r'thread-(\d+)\.html?', link)
                if match:
                    id = str(match.group(1))
                else:
                    id = "NA"
            elif href.attrib['href'].startswith("forum-"):
                tags.append(href.text)
        poster_array = li.xpath("descendant::span[contains(@class, 'haya-post-info-username')]/a/text()")
        if len(poster_array) > 0:
            poster = poster_array[0]
        else:
            poster = "NA"
        date_array = li.xpath("descendant::span[contains(@class, 'haya-post-info-username')]/span/text()")
        if len(date_array) > 0:
            date = date_array[0]
            select_one_date = date
        else:
            date = "NA"

        if config_setting.end_date_line.date() <= datetime.strptime(date,"%Y-%m-%d %H:%M").date() < config_setting.start_date_line.date():  # 如果指定了date_line，那么只爬取到date_line就结束
            count += 1
            listing.append({"id": id, "name": text, "forum": forum, "link": link, "tags": tags, "poster": poster, "date": date})
        else:
            print(f"overdue date: {date} skipped!")

    if len(listing) > 0:
        output_path = f"{detail_base_path}/{config_setting.batch_name}/{forum}/{forum}_{page}.json"
        # output_path = "%s/%s__%s.json" % (base_path, format_path(str(count)), format_path(text))
        if not os.path.isdir(os.path.dirname(output_path)) and os.path.dirname(output_path) != "":
            os.makedirs(os.path.dirname(output_path))
        with codecs.open(output_path, mode='w', encoding='utf-8') as f:
            line = json.dumps(listing, indent=2, ensure_ascii=False)
            print(f"json saved to {output_path}...")
            f.write(line)

    if config_setting.end_date_line and datetime.strptime(select_one_date,"%Y-%m-%d %H:%M").date() <= config_setting.end_date_line.date():  # 如果指定了date_line，那么只爬取到date_line就结束
        return False
    else:
        return True


if __name__ == "__main__":
    if not os.path.isdir(f"assets"):
        os.mkdir(f"assets")
    if not os.path.isdir(f"assets/{config_setting.batch_name}"):
        os.mkdir(f"assets/{config_setting.batch_name}")
    if not os.path.isdir(f"assets/{config_setting.batch_name}/log"):
        os.mkdir(f"assets/{config_setting.batch_name}/log")
    sys.stdout = Logger(datetime.now().strftime(f"assets/{config_setting.batch_name}/log/%Y%m%d_%H%M%S") + ".txt")

    forum_dict = {  # value是每个forum的起始页号
        # "forum-1": 587, # "最新电影" 1567
        # "forum-2": 1743, # "电视剧集" max 2441
        # "forum-3": 571, # "高清电影" max 2676
        # "forum-4": 1718, # "高清剧集" max 2064
        # "forum-9": 57, # "游戏专区" end
        # "forum-17": 46, # "动漫专区" end
        # "forum-8": 173, # "音乐专区" end
    }
    # forum = "forum-3"
    for k, v in forum_dict.items():
        forum = k
        start_page_num = v
        # forum = config_setting.forum
        # stop_flag = False
        for page_num in range(start_page_num, 2065):
            if not process_listing_html(forum=forum, page=page_num):
                break

# response_detail = requests.request("GET", link, timeout=18, headers=headers)
# with open("bbb.html", "w", encoding='utf-8') as html_file:
#     html_file.write(response_detail.text)
#
# print(href_array)