import os
import re
import time
from news_regular_expression import RegularExpression

regx = RegularExpression()


def __convert_to_date_time(unix_time):
    time_normal = time.gmtime(unix_time)  # 转换为普通时间格式（时间数组）
    return time.strftime("%Y-%m-%d %H:%M:%S", time_normal)  # 格式化为需要的格式


def __take_head(elem):
    return elem[0]


def parse_sputniknews(html_f, news_list, url_head):
    results = regx.SPUTNIKNEWS_DIV_M_WHITE.findall(html_f.read())
    for res in results:
        print(f"sputniknews:{res}")
        _unix_time = 0
        _dt_time = 0
        _title = ""
        _url = url_head
        date_time = regx.SPUTNIKNEWS_UNIX_TIME.search(res)
        if date_time:
            if date_time.group(1) == "":
                continue
            _unix_time = int(date_time.group(1))
            _dt_time = __convert_to_date_time(_unix_time)
            print(f"unix_time: {date_time.group(1)}, date_time: {_dt_time}")
        url_title = regx.SPUTNIKNEWS_URL_TITLE.search(res)
        if url_title:
            _url = url_title.group(1)
            _title = url_title.group(2)
            if _title == "" or _url == "":
                continue
            print(f"url:{_url}, title={_title}")
        else:
            print("can't find url&title")

        a_piece_of_news = tuple([_unix_time, _dt_time, _title, _url])
        news_list.append(a_piece_of_news)


def parse_cls(html_f, news_list, url_head):
    class_list = [
        "f-w-b c-ef9524 b-c-222",
        "f-s-15 c-222 b-c-222222 line2 underline-width1",
        "f-w-b c-222 b-c-222",
        "clearfix o-h f-s-23 f-w-b home-article-title home-article-title-small",
        "c-383838 w-100p o-h home-article-rec-list",
        "c-666",
        "c-555"
    ]
    html_str = html_f.read()
    results = regx.CLS_FLAG_A.findall(html_str)

    for res in results:
        print(res)
        _unix_time = 0
        _dt_time = 0
        _title = ""
        _url = url_head
        class_obj = re.search('class="(.*?)"', res[0])
        href_obj = re.search('href="(.*?)"', res[0])
        if class_obj and href_obj:
            print(f"    class: {class_obj.group(1)}")
            print(f"    href: {href_obj.group(1)}")
            if class_obj.group(1) in class_list:
                _title = res[1]
                _url += href_obj.group(1)[1:]
                a_piece_of_news = tuple([_unix_time, _dt_time, _title, _url])
                news_list.append(a_piece_of_news)


def parse_zaobao(html_f, news_list, url_head):
    print(f"zaobao: {html_f}, {news_list}, {url_head}")
    html_str = html_f.read()
    results = regx.CLS_FLAG_A.findall(html_str)
    _temp_list = []

    for res in results:
        print(res)
        _unix_time = 0
        _dt_time = 0
        _title = ""
        _url = url_head
        # a_piece_of_news = tuple([_unix_time, _dt_time, _title, _url])
        title_obj = re.search('title="(.*?)"', res[0])
        href_obj = re.search('href="(.*?)"', res[0])
        if title_obj and href_obj:
            _title = title_obj.group(1)
            _url += href_obj.group(1)[1:]
            class_obj = re.search('class="(.*?)"', res[0])
            if class_obj:
                if (class_obj.group(1) == "article video" or class_obj.group(1) == "article pdb20 line mgb15"
                        or class_obj.group(1) == "category-link"):
                    continue
                a_piece_of_news = tuple([_unix_time, _dt_time, _title, _url])
                _temp_list.append(a_piece_of_news)
            else:
                div_class = re.compile('class="(.*?)"', re.S)
                div_class_obj = div_class.findall(res[1])
                for dco in div_class_obj:
                    print(f"dco: {dco}")
                    if "f18 m-eps" == dco or "m-eps" == dco:
                        a_piece_of_news = tuple([_unix_time, _dt_time, _title, _url])
                        _temp_list.append(a_piece_of_news)
                        break

    for t_news in _temp_list:
        time_index = t_news[3].rfind("/story")
        assert (time_index != -1)
        time_index += len("/story")
        time_part = t_news[3][time_index:].split('-')
        print(f"time_date:{time_part[0]}, time_clock: {time_part[1]}")
        _year = time_part[0][:4]
        _month = time_part[0][4:6]
        _day = time_part[0][6:]
        _dt_time = f"{_year}-{_month}-{_day} 00:00:00"
        t = time.strptime(_dt_time, "%Y-%m-%d %H:%M:%S")
        _unix_time = time.mktime(t)
        _unix_time += int(time_part[1])
        _dt_time2 = __convert_to_date_time(_unix_time)
        print(f"_dt_time:{_dt_time},_dt_time2:{_dt_time2}, time_clock: {time_part[1]}, _unix_time:{_unix_time}")
        a_piece_of_news = tuple([_unix_time, _dt_time, t_news[2], t_news[3]])
        news_list.append(a_piece_of_news)


def parse_html(html_path, news_dict, time_select, content_list=None):
    if content_list is None:
        content_list = []

    html = os.path.join(html_path, news_dict["name"] + ".html")
    _url_head = news_dict["url"]
    with open(html, "r+", encoding='UTF-8') as html_f:
        eval("parse_" + news_dict["title"] + "(html_f, content_list, _url_head)")

    if len(content_list) != 0:
        txt = os.path.join(html_path, news_dict["name"] + ".txt")
        with open(txt, "w", encoding="utf-8") as news_f:
            content_list.sort(reverse=True, key=__take_head)
            content_str = ""
            for cl in content_list:
                content_str += str(cl)
                content_str += "\n"
            news_f.write(content_str)
    else:
        _name = news_dict["name"]
        print(f"Found No news from {_name}")
