import feedparser
from app.models import Crawler, Record
from app.extensions import db, scheduler
from time import mktime
from datetime import datetime


def feed_handler(crawler_id):
    agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 ' \
            'Safari/537.36 Edg/80.0.361.57 '
    with scheduler.app.app_context():
        cra = Crawler.query.get_or_404(crawler_id)
        file = feedparser.parse(url_file_stream_or_string=cra.source, agent=agent)
        for each in file.entries:
            exist = Record.query.filter_by(title=each.title).first()
            if not exist:
                dt = each.published_parsed
                dt = datetime.fromtimestamp(mktime(dt) + 28800)
                record = Record(title=each.title, url=each.link, crawler_id=cra.id, timestamp=dt)
                db.session.add(record)
                db.session.commit()
        cra.update = datetime.now()
        db.session.commit()


if __name__ == '__main__':
    feed_handler(1)
