# from win10toast import ToastNotifier
# toast = ToastNotifier()
# toast.show_toast(title="This is a title", msg="This is a message",
#                  icon_path=r"C:\Program Files\Internet Explorer\images\bing.ico", duration=10)


from plyer import notification

# notification.notify(
#     title='测试',
#     message='消息',
#     app_icon=None,
#     timeout=10,
# )

import datetime
import json
import random
import pymysql
import requests
import time
from collections import deque
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# 过滤条件
filter_ok_words = [
    '白条', '立减金', "还款", "话费", "数字币", "数币", "数字人民币", "和包", "建行生活", "支付宝", "支付", "建行",
    "建行生活", "云闪付", "移动", "联通", "电信", "E卡", "京东E卡", "建行", "和包", "分分卡", "打卡", "签到", "小金库", "地铁",
    "大都会", "盒马", "体验金", "沃钱包", "速度", "大水", "小水", "大毛", "小毛", "手慢无", "1元", "麦当劳", "集卡", "zfb", "ysf",
    "邮储", "邮政", "动卡空间", "中信", "平安", "平安口袋银行", "兴业", "交通", "买单吧", "浦发", "浦大喜奔", "广发", "发现精彩",
    "民生", "全民生活", "e生活", "工银e生活", "工商银行", "工行", "工银兴农通", "招商", "掌上生活", "光大", "阳光惠生活", "中行", "中国银行",
    "中银跨境", "朴朴", "翼支付", "yzf", "抽奖"
]
filter_not_words = ["为什么", "怎么", "为啥", "?", "？", "能不能", "可以吗", "可以不", "好价", "联盟", "吗", "呢"]

search_times = 0
today_count = 0
max_id = 0
last_max_id = 0
new_data_list = []
webPrefix = 'http://new.xianbao.fun'
# 创建一个最大长度为20的双端队列
pushed_messages = deque(maxlen=20)


def mysql_db():
    conn = pymysql.connect(
        host='101.132.157.214',
        port=3306,
        database='alarm',
        charset='utf8',
        user='alarm',
        password='Tian201404293'
    )
    return conn


# 获取数据库中的所有代理IP
def get_proxies_from_db(conn):
    cur = conn.cursor()
    cur.execute("SELECT ip FROM proxies")
    return [row[0] for row in cur.fetchall()]


def get_data(proxies):
    global last_max_id
    global search_times
    url = "http://new.xianbao.fun/plus/json/push.json"
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Cache-Control': 'max-age=0',
        'Cookie': 'timezone=8',
        'If-Modified-Since': 'Sun, 12 Nov 2023 13:23:59 GMT',
        'If-None-Match': '"6550d1ef-1fd0"',
        'Proxy-Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.68'
    }
    while True:
        try:
            proxy = random.choice(proxies)
            response = requests.get(url, headers=headers, proxies={'http': proxy, 'https': proxy},
                                    timeout=2)  # 设置超时时间为1秒
            break
        except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout,
                requests.exceptions.ReadTimeout) as e:  # 捕获连接错误和连接超时异常
            return None  # 如果发生错误，直接返回None
    # 检查服务器的响应是否可以被解析为JSON
    try:
        data_list = response.json()
    except (ValueError, Exception) as e:  # 捕获所有可能的异常
        return None  # 如果发生错误，直接返回None
    # 如果data_list为空，直接返回
    if not data_list:
        return None
    # 判断data_list是否是一个list(修复问题：客户端返回{'code': '0x01900012', 'message': 'cannot find token param.'})
    if isinstance(data_list, dict):
        return None
    # 如果新数据的最小id都大于上一次的最大id，返回所有新数据
    if min(int(item['id']) for item in data_list) > last_max_id:
        last_max_id = data_list[0]['id']
        new_data_list = data_list
    # 否则，只返回id大于上一次查询的最大id的数据
    else:
        new_data_list = [item for item in data_list if item['id'] > last_max_id]
        last_max_id = data_list[0]['id']

    return new_data_list


# 线报过滤
def filter_data(data, filter_ok_words, filter_not_words):
    # 过滤数据，先筛选出包含filter_ok_words的数据，再过滤掉包含filter_not_words的数据
    result = []
    for item in data:
        if any(f in item['title'] or f in item['content'] for f in filter_ok_words):
            if not any(f in item['title'] or f in item['content'] for f in filter_not_words):
                result.append(item)
    return result


# bark消息推送
def send_bark_notification(title, content, url):
    bark_key = 'wXMCSCnZVj8NtsKzbjYrq7'
    sound = 'bell'
    # 构造请求的URL
    url = f"https://api.day.app/{bark_key}/🐹 {title}/💬 {content}?url={webPrefix + url}&sound={sound}"
    # 发送请求
    try:
        response = requests.get(url)
        # 检查请求是否成功
        if response.status_code == 200:
            return 1  # 推送成功，返回1
        else:
            return 0
    except requests.exceptions.ConnectionError:
        return 0


def send_win10_notification(title, content, url):
    # 使用plyer库发送通知
    notification.notify(
        title=title,
        message=content,
        app_icon=None,
        timeout=10,
    )
    return 1

################################### 定义相似度算法过滤 ###################################
def is_similar(str1, str2):
    # 创建一个TfidfVectorizer对象
    vectorizer = TfidfVectorizer()
    # 尝试进行fit_transform操作
    try:
        tfidf = vectorizer.fit_transform([str1, str2])
        similarity_matrix = cosine_similarity(tfidf)
        return similarity_matrix[0, 1] > 0.6  # 如果相似度大于0.8，返回True
    except ValueError:
        print("The documents only contain stop words")
        return False


def filter_similar_messages(title, content):
    # 计算新消息与已推送消息的相似度
    for msg_title, msg_content in pushed_messages:
        if not content:
            if is_similar(title, msg_title):
                return False  # 如果新消息与任何已推送消息相似，返回False
        else:
            if is_similar(title, msg_title) or is_similar(content, msg_content):
                return False  # 如果新消息与任何已推送消息相似，返回False
    # 否则，将新消息添加到队列中，并返回True
    pushed_messages.append((title, content))
    return True


if __name__ == '__main__':
    print('😉😉😉来咯来咯他来咯，抓取开始......')
    # 创建DB链接
    conn = mysql_db()

    # 获取数据库中的所有代理IP
    proxies = get_proxies_from_db(conn)

    while True:
        # 线报列表
        list = get_data(proxies)
        print("线报列表list=", list)
        if list is None:
            continue
        nice_list = filter_data(list, filter_ok_words, filter_not_words)
        # 遍历nice_list，并对每个元素发送通知
        success_count = 0
        for item in nice_list:
            title = item['title']
            content = item['content']
            if not filter_similar_messages(title, content):
                print("😉该条消息相似度较高，可能已经有人发过，本次取消推送......")
                continue  # 如果消息与已推送消息相似，跳过这条消息
            # PC微信消息推送(初版只在10点-19点微信推送)
            # 其余时间bark推送
            success_count += send_bark_notification(item['title'], item['content'], item['url'])
        today_count = today_count + success_count
        run_log = f"💃【第{search_times + 1}轮】本次拉取：{len(list)}条，本次推送：{success_count}条, 今日合计已推送：{today_count}条......"
        print(run_log)
        search_times += 1
        time.sleep(15)