#!/usr/bin/env python
# -*- coding:utf-8 -*-
import schedule
import time
import threading
from http_client import *
from DB import *


def job(rss_url):
    import feedparser
    import re
    import uuid
    rss_a = feedparser.parse(rss_url)
    entries = rss_a.entries
    for entry in entries:
        name = entry.title
        content = do_get(entry.link)
        mag_link = re.findall("<a href=\"(magnet.*?)\">", content, re.M)[0]
        file_info = re.findall(r"\s+Seeds: (\d+)\n\s+Leechers: (\d+)\n\s+Size: (.*GB)", entry.description, re.M)
        if not check_in_db(name, mag_link):
            uid = str(uuid.uuid4())
            save(uid, name, mag_link, file_info)


def check_in_db(name, mag_link):
    infos = fetchall_test()
    for info in infos:
        if info[1].__eq__(name) and _get_info_hash(info[2]).__eq__(_get_info_hash(mag_link)):
            return True
    return False


def _get_info_hash(mag_link):
    return mag_link[20:60]


def save(uid, name, mag_link, file_info):
    save_test(uid, name, mag_link)
    data = {"TA_action_on": 1,
            "TA_title": name,
            "TA_content": file_info,
            "TA_uuid": uid}
    do_post("http://sc.ftqq.com/webhook/465-5b60051857c0b", data)


class Rss(threading.Thread):

    def __init__(self, t, rss_url):
        super(Rss, self).__init__()
        self.t = t
        self.rss_url = rss_url

    def run(self):
        schedule.every(self.t).minutes.do(job, self.rss_url)
        while True:
            schedule.run_pending()
            time.sleep(1)


if __name__ == "__main__":
    rss = Rss(1, "htt://dddd")
    rss.start()
    print "thread run"
