# --*-- coding:utf-8 --*--
import datetime
import os.path
import random
import time
import urllib.error
import urllib.request

import logging
from bs4 import BeautifulSoup

import emo
import investor
import proxy

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler1 = logging.FileHandler('down.py.log')
handler1.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler1.setFormatter(formatter)
logger.addHandler(handler1)

handler2 = logging.StreamHandler()
handler2.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler2.setFormatter(formatter)
logger.addHandler(handler2)


def load_hs300():
    ret = []
    with open("sh300.txt", "r", encoding="utf-8") as f:
        for line in f.readlines():
            code, name = line.strip().split("\t")
            secu = investor.Security()
            secu.code = code
            secu.name = name
            ret.append(secu)
    return ret


def reqNews(url):
    html = reqHtml(url)
    if html:
        soup = BeautifulSoup(html, "html.parser")

        try:
            news = getNews(soup)
            return news
        except Exception as e:
            print(url, e)


def getNews(soup):
    # tag = soup.find_all(class_="name")
    # name,user_id = tag[0].text, tag[0].get("href")
    tag = soup.find_all(class_="address")
    address = tag[0].text

    # tag= soup.find_all(class_="newstitle")
    # news_title = tag[0].text
    tag = soup.find_all(class_="newstext")
    news_text = tag[0].text

    tag = soup.find_all(class_="time")
    time = tag[0].text

    news = investor.News()
    news.address = address
    news.news_text = news_text
    news.time = time
    return news


def guba_html_req_parse(url):
    """
    某个股股吧最新发帖列表
    :param url:
    """
    html = reqHtml(url)
    if html:
        soup = BeautifulSoup(html, "html.parser")

        tags = soup.find_all(class_="listitem")
        lst = []
        for tag in tags:
            read = tag.find_all(class_="read")[0]
            reply = tag.find_all(class_="reply")[0]
            title = tag.find_all(class_="title")[0]
            author = tag.find_all(class_="author")[0]
            update = tag.find_all(class_="update")[0]
            href = title.find_all("a")[0].get("href")

            id = author.find("a").get("href").split('/')[-1]
            name = author.text

            inv = investor.Investor()
            inv.id = id
            inv.name = name

            guba = investor.Guba()
            guba.read = read.text
            guba.replay = reply.text
            guba.title = title.text
            guba.url = href
            guba.time = update.text
            guba.author = inv

            lst.append(guba)

        return lst


sleep_lst = [1, 2]


def reqHtml(url):
    html = proxy.request(url)
    # html = request(url)
    # time.sleep(random.choice(sleep_lst))
    return html


def request(url):
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'
    ]

    headers = {"User-Agent": random.choice(user_agents)}
    request = urllib.request.Request(url, headers=headers)
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        return html
    except urllib.error.URLError as e:
        print(e, url)
    except TimeoutError as e:
        print(e, url)
    except Exception as e:
        print(e)


def get_guba_page(secu, baseUrl, page):
    url = "%s/list,%s,f_%s.html" % (baseUrl, secu.code, page)
    guba_lst = guba_html_req_parse(url)
    return guba_lst


SKIP = "SKIP"
BREAK = "BREAK"
MATCH = "MATCH"

PAGE_NO = 11189


def save_page_no(code,page):
    path = "%s_page.txt" % code
    with open(path,  "w") as f:
        f.writelines("%s" % page)

def read_page_no(code):
    path = "%s_page.txt" % code
    if os.path.exists(path):
        with open(path,"r") as f:
            lines = f.readlines()
            if lines:
                return  int(lines[0])
    return 1

miss_d = set()
def load_miss_day():
    if not miss_d:
        with open("miss_day.txt","r") as f:
            for line in f.readlines():
                line = line.strip("\n")
                miss_d.add(line)
    return  miss_d

def get_guba_year(secu, baseUrl, year="2023", day=None):
    page = read_page_no(secu.code)
    guba_lst = get_guba_page(secu, baseUrl, page)
    chk = None

    while True:
        if guba_lst:
            # chk = check(baseUrl, year, guba_lst[0], day)
            chk = is_missing_day(baseUrl,guba_lst[0])
            if chk == SKIP:
                pass
            elif chk == MATCH:
                save_page(secu, baseUrl, year, guba_lst)
            elif chk == BREAK:
                return
        logger.info("%s,page=%s,chk=%s" % (secu, page, chk))
        page = page + 1
        guba_lst = get_guba_page(secu, baseUrl, page)
        save_page_no(secu.code, page)


def get_guba_history(secu, baseUrl, day=datetime.datetime.now().strftime("%Y-%m-%d"), page=1, deep=1):
    if deep > 50:
        return

    # url = "%s/list,%s,f.html" % (baseUrl, secu.code)
    url = "%s/list,%s,f_%s.html" % (baseUrl, secu.code, page)
    guba_lst = guba_html_req_parse(url)

    if guba_lst and is_between_day(day, guba_lst):
        return guba_lst
    else:
        return get_guba_history(secu, baseUrl, day, page + 1, deep + 1)


def save_page(secu, baseUrl, year, guba_lst):
    path = os.path.join("result", year)
    if not os.path.exists(path):
        os.makedirs(path)

    file = "%s_%s.txt" % (secu.code, secu.name)
    f = open(os.path.join(path, file), "a")

    if guba_lst:
        for guba in guba_lst:
            url = baseUrl + guba.url
            news = reqNews(url)
            if news:
                guba.secu = secu
                guba.news = news
                emo.analysis_guba(guba)
                if is_valid(guba):
                    line = str(guba).replace('\xa0', '').replace('\xb6', '').replace('\xae', '') + '\n'
                    try:
                        f.write(line)
                    except UnicodeEncodeError as e:
                        logger.error("%s,%s" % (line, e))
                    f.flush()
    f.close()
    logger.info("done,%s" % os.path.join(path, file))


def get_guba_data(secu, baseUrl, day, topN=15, page=1):
    # guba_lst = getStock("https://guba.eastmoney.com/list,000625,f.html")
    # url = "%s/list,%s,f.html" % (baseUrl, secu.code)
    # guba_lst = getStock(url)

    guba_lst = get_guba_history(secu, baseUrl, day=day, page=page)
    if not guba_lst:
        return

    path = os.path.join("result", day)
    if not os.path.exists(path):
        os.makedirs(path)

    file = "%s_%s.txt" % (secu.code, secu.name)
    f = open(os.path.join(path, file), "w")

    cnt = 1
    if guba_lst:
        for guba in guba_lst:
            if not is_match_day(day, guba):
                continue
            if topN and cnt > topN:
                break
            url = baseUrl + guba.url
            news = reqNews(url)
            if news:
                guba.secu = secu
                guba.news = news
                emo.analysis_guba(guba)
                if is_valid(guba):
                    line = str(guba).replace('\xa0', '').replace('\xb6', '').replace('\xae', '') + '\n'
                    try:
                        f.write(line)
                    except UnicodeEncodeError as e:
                        logger.error("%s,%s" % (line, e))
                    f.flush()
                    cnt = cnt + 1
    f.close()
    logger.info("done,%s" % os.path.join(path, file))


def is_valid(guba):
    if guba.senti.words > 100:
        return False
    return True


def is_between_day(day, guba_lst):
    s, e = guba_lst[-1].time[:5], guba_lst[0].time[:5]
    d = day[-5:]
    return s <= d and d <= e


def is_missing_day(baseUrl,guba):
    url = baseUrl + guba.url
    news = reqNews(url)
    if news:
        day = news.time[:10]
        d = load_miss_day()
        if day in d:
            return MATCH
        elif day.startswith("2022"):
            return BREAK

def check(baseUrl, year, guba, day=None):
    url = baseUrl + guba.url
    news = reqNews(url)
    if is_skip_year(year, news, day):
        return SKIP
    elif is_match_year(year, news, day):
        return MATCH
    elif is_break_year(year, news):
        return BREAK


def is_skip_year(year, news, day=None):
    if news and year < news.time[:4]:
        return True
    elif news and year==news.time[:4] and day and day < news.time[5:10]:
        return True


def is_match_year(year, news, day=None):
    if news and year == news.time[:4]:
        if day and day < news.time[5:10]:
            return False
        return True


def is_break_year(year, news):
    if news and year > news.time[:4]:
        return True


def is_match_day(day, guba):
    if day[5:] == guba.time[:5]:
        return True


def download_year(code=None, skip=None, skip_day=None):
    baseUrl = "https://guba.eastmoney.com"
    secu_lst = load_hs300()
    i = 1
    ts = time.time()
    for sec in secu_lst:
        if skip and sec.code <= skip:
            continue
        if code and sec.code != code:
            continue
        logger.info("processing, %s, %s" % (sec, i))
        t1 = time.time()
        get_guba_year(sec, baseUrl=baseUrl, year="2023", day=skip_day)
        t2 = time.time()
        i = i + 1
        h, m, s = convert_seconds(t2 - t1)
        logger.info("done, %s, %s, 耗时=%s:%s:%s" % (sec, i, h, m, s))
    te = time.time()
    h, m, s = convert_seconds(te - ts)
    logger.info("All done,耗时=%s:%s:%s" % (h, m, s))


def convert_seconds(seconds):
    hours = seconds // 3600
    seconds %= 3600
    minutes = seconds // 60
    seconds %= 60
    return int(hours), int(minutes), int(seconds)


def download(day=datetime.datetime.now().strftime("%Y-%m-%d"), page=1, code=None, skip=None):
    baseUrl = "https://guba.eastmoney.com"
    secu_lst = load_hs300()
    i = 1
    for sec in secu_lst:
        if skip and sec.code <= skip:
            continue
        if code and sec.code != code:
            continue
        logger.info("processing, %s, %s, %s" % (day, sec, i))
        get_guba_data(sec, baseUrl=baseUrl, day=day, topN=20, page=page)
        i = i + 1
        logger.info("done, %s, %s, %s" % (day, sec, i))
    logger.info("All done,%s" % day)


def test():
    baseUrl = "https://guba.eastmoney.com"
    # url = "https://guba.eastmoney.com/list,000625,f.html"
    secu = investor.Security()
    secu.code = "000625"
    secu.name = "长安汽车"
    print(get_guba_history(secu, baseUrl, day="2024-02-05", page=3))

def test1():
    news = investor.News()
    news.address = "b"
    news.news_text = "a"
    news.time = "2023-12-20 12:30:11"
    print(is_match_year("2023", news, day="09-02"))
    print(is_skip_year("2023", news, day="09-02"))

def test2():
    # url = "https://guba.eastmoney.com/news,zssh000001,1317671442.html"
    # print (guba_html_req_parse(url))
    save_page_no("zssh000001",PAGE_NO)
    print(read_page_no("zssh000001"))

def test_missing_day():
    print(is_missing_day("2023-12-28"))


if __name__ == "__main__":
    # test1()
    # download(day="2024-02-05",code="000063")
    # download()
    # download(day="2024-01-31",skip="600029")
    # download(day="2024-03-13")
    # test_missing_day()
    download_year(code="zssh000001")
