import requests
from bs4 import BeautifulSoup

from zz import zz_code, zz_dingtalk, zz_log, zz_str
import os

log = zz_log.of()


def run():
    token = zz_code.env("TUAN0818_TOKEN")
    if zz_str.is_null_or_empty(token):
        log.error('未配置TUAN0818_TOKEN')
        return

    token_arr = token.split(";")
    log.info(token_arr)

    log_msg = '**0818优惠信息** \n\n'
    handle = Handle(token_arr)

    log_msg_one = handle.run_one()
    if zz_str.is_null_or_empty(log_msg_one):
        log.info("无优惠信息")
        return

    log.info(log_msg_one)
    log_msg += log_msg_one + "\n\n"

    zz_dingtalk.send_markdown(log_msg, "0818优惠信息")


# 蒙自源
class Handle:
    def __init__(self, token_arr):
        self.token_arr = token_arr
        self.file_path = 'api_0818tuan.txt'
        if not os.path.exists(self.file_path):
            pass

    def run_one(self):
        api_url = 'http://www.0818tuan.com'
        html = self.get_html(api_url)
        soup = BeautifulSoup(html, 'html.parser')

        self.delete_file()
        tit_arr = self.get_title_arr()  # 已搜索过的标题
        list_group_item = soup.find_all('a', attrs={'class': 'list-group-item'})
        res = []
        for item in list_group_item:
            title = str(item['title'])
            if zz_str.is_null_or_empty(title):
                continue

            title = title.replace("\n", '')
            if not zz_str.contains(title, self.token_arr):  # 匹配标题
                continue

            if zz_code.arr_find(tit_arr, title):  # 已搜索过的标题
                continue

            url = api_url + item['href']
            res.append({
                'title': title,
                'url': url,
            })

        if len(res) <= 0:
            return ''

        log_msg = ''
        for item in res:
            log_msg += f"{item['title']}\n\n[-- 点击查看 --]({item['url']})\n\n"
            self.title_add(item['title'])

        return log_msg

    def get_headers(self):
        headers = {
        }
        user_agent = zz_code.env("USER_AGENT")
        if zz_str.has_any_text(user_agent):
            headers['User-Agent'] = user_agent

        return headers

    # 签到
    def get_html(self, api_url):
        headers = self.get_headers()

        log.debug(api_url)
        log.debug(headers)
        res = requests.get(api_url, headers=headers)
        if res.content is None or res.status_code != 200:
            return ''
        return res.content.decode('utf-8')

    # 获取所有搜索过的标题
    def get_title_arr(self):
        config = ""
        try:
            with open(self.file_path, 'r', encoding='utf-8') as file:
                lines = file.readlines()
                for line in lines:
                    config += line
        except Exception as ex:
            log.error(ex)

        return config.split("\n")

    # 记录已搜索过的标题
    def title_add(self, title):
        file = open(self.file_path, "a", encoding="utf-8")
        # 写入数据
        file.write(title + "\n")
        # 关闭文件
        file.close()

    def delete_file(self):
        """
        只保留今天的文件
        """
        if os.path.isfile(self.file_path):
            file_create_date = zz_code.date_from_timestamp(os.path.getmtime(self.file_path))
            if file_create_date < zz_code.date_today():
                os.remove(self.file_path)


if __name__ == '__main__':
    run()
