import re
from flask import Flask, jsonify, send_file
import xml.etree.ElementTree as ET
from io import BytesIO
import feedparser
from flask_cors import CORS
from flask_cors import cross_origin
from dateutil.parser import parse as parse_date
from model import db, Article, Category, RSSSource
import requests
import time
from flask import request
from datetime import datetime, timedelta
import pytz
from sqlalchemy import desc
from flask_socketio import SocketIO, emit
import threading

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="http://localhost:8080")
# 数据库连接

app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///rssdata.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False

# 初始化Sqlite数据库
db.init_app(app)
# 解决跨域

CORS(app, resources={r'/*': {'origins': '*'}})


# 获取全部分类
@app.route('/get-categories', methods=['GET'])
@cross_origin()
def get_categories():
    categories = Category.query.filter(Category.is_deleted == 0).all()
    category_list = []
    for category in categories:
        category_data = category.format_category()
        rss_sources = RSSSource.query.filter_by(category_id=category.id, is_deleted=0).all()
        rss_source_list = []

        for rss_source in rss_sources:
            rss_source_data = rss_source.serialize()
            rss_source_data['item_count'] = Article.query.filter_by(feed_id=rss_source.id, is_read=0, is_deleted=0).count()
            if rss_source.id == 479:
                rss_source_data['item_count'] = Article.query.filter_by(is_read=0, is_deleted=0).count()
            if rss_source.id == 478:
                rss_source_data['item_count'] = Article.query.filter_by(is_read=0, is_deleted=0, is_favorite=1).count()
            rss_source_list.append(rss_source_data)

        category_data['items'] = rss_source_list
        category_list.append(category_data)
    return jsonify(category_list)


# def parse_date(date):
#     # Add your custom date parsing logic here if needed
#     return datetime.strptime(date, '%a, %d %b %Y %H:%M:%S %z')

# 刷新
@app.route('/crawl-articles', methods=['POST'])
@cross_origin()
def crawl_articles():
    data = request.get_json()
    rss_url = data.get('rss_url', '')
    # rss_url = 'https://www.expreview.com/rss.php'
    feed_id = data.get('feed_id', '')
    try:
        feed_id = int(feed_id)
    except ValueError:
        return jsonify({'error': '无效的文章 ID'})
    print(f"爬取的 RSS 源 ID: {feed_id}")

    if not rss_url:
        return jsonify({'error': '无效的 RSS URL'})

    # 获取当前日期和前一天的日期，并设定时区为 Asia/Shanghai
    tz = pytz.timezone('Asia/Shanghai')
    today = datetime.now(tz)
    one_day_ago = today - timedelta(days=1)

    try:
        # 爬取最近一天的文章数据
        feed = feedparser.parse(rss_url)
        new_articles = [
            {
                'title': entry.title,
                'published_at': entry.published,
                'link': entry.link,
                'description': entry.description
            }
            for entry in feed.entries
        ]
    except Exception as e:
        print(f"从 {rss_url} 爬取文章失败。错误信息：{str(e)}")
        return jsonify({'error': f'从 {rss_url} 爬取文章失败。错误信息：{str(e)}'})

    # 检查数据库中是否已存在文章
    existing_urls = [article.link for article in Article.query.all()]
    articles_to_insert = []
    for article in new_articles:
        if article['link'] not in existing_urls:
            articles_to_insert.append(article)

    # 将新文章写入数据库
    for article_data in articles_to_insert:
        try:
            published = article_data.get('published_at', None)
            if published is None:
                published_at = datetime.now(pytz.utc)  # 或者将其设置为其他适当的值
            else:
                published_at = parse_date(published)
            article = Article(
                feed_id=feed_id,
                title=article_data['title'],
                link=article_data['link'],
                description=article_data.get('description', None),
                published_at=published_at,
                is_read=0,
                is_favorite=0,
                created_at=datetime.utcnow(),
                updated_at=datetime.utcnow(),
                is_deleted=0,
                bookmark=0
            )
            db.session.add(article)
        except Exception as e:
            print(f"将文章插入数据库失败。错误信息：{str(e)}")

    try:
        db.session.commit()
        print(f"成功从 {rss_url} 爬取并写入 {len(articles_to_insert)} 篇新文章到数据库！")
        return jsonify({'message': f'成功从 {rss_url} 爬取并写入 {len(articles_to_insert)} 篇新文章到数据库！'})
    except Exception as e:
        print(f"将更改提交到数据库失败。错误信息：{str(e)}")
        db.session.rollback()
        return jsonify({'error': '将更改提交到数据库失败。'})


# 爬取并保存文章的函数
def crawl_rss_articles(rss_source):
    try:
        response = requests.get(rss_source.url, timeout=10, allow_redirects=True)
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        print(f"Failed to crawl articles from {rss_source.url}: {e}")
        return

    feed = feedparser.parse(response.text)
    articles = feed.entries

    for article in articles:
        # 处理文章
        description = article.get('description', None)
        published = article.get('published', None)
        if published is None:
            published_at = datetime.utcnow()  # 或者将其设置为其他适当的值
        else:
            published_at = parse_date(published)
        new_article = Article(
            feed_id=rss_source.id,
            title=article.get('title', None),
            link=article.get('link', None),
            description=description,
            published_at=published_at,
            is_read=0,
            is_favorite=0,
            created_at=datetime.utcnow(),
            updated_at=datetime.utcnow(),
            is_deleted=0,
            bookmark=0
        )
        db.session.add(new_article)

    db.session.commit()

    print(f"Successfully crawled {len(articles)} articles from {rss_source.url}")


# 示例：爬取并保存指定RSS源的文章
def crawl_and_save_articles():
    # 查询所有的RSS源
    rss_sources = RSSSource.query.all()

    # 爬取并保存每个RSS源的文章
    for rss_source in rss_sources:
        crawl_rss_articles(rss_source)
        time.sleep(1)


# 判断文字格式
def truncate_text(text, max_length=20):
    if text is None:
        return ''
    pattern = r'<[^>]+>|[\x00-\x1F\x7F-\x9F\xAD\u1680\u180E\u2000-\u200D\u2028\u2029\u202F\u205F\u2060\u3000\uD800-\uDFFF\uFEFF\uFFF9-\uFFFC\uFFFE\uFFFF]'
    stripped_text = re.sub(pattern, '', text)
    truncated_text = stripped_text[:max_length]
    return truncated_text


# 规范时间
def format_time_difference(published_time):
    now = datetime.now()

    if published_time is None:
        return ""  # 或者返回默认值，视需求而定

    published_time = datetime.fromisoformat(published_time)
    timestamp = published_time.timestamp()

    time_difference = now - published_time

    if time_difference.days > 0:
        return f"{time_difference.days}天前"
    elif time_difference.seconds >= 3600:
        hours = time_difference.seconds // 3600
        return f"{hours}小时前"
    elif time_difference.seconds >= 60:
        minutes = time_difference.seconds // 60
        return f"{minutes}分钟前"
    else:
        return "刚刚"


# 获取文章
@app.route('/get-articles', methods=['GET'])
@cross_origin()
def get_articles():
    articles = Article.query.order_by(desc(Article.published_at)).all()
    for article in articles:
        print(article.title, int(article.published_at.timestamp()))
    article_list = []
    for article in articles:
        article_data = {
            'id': article.id,
            'feed_id': article.feed_id,
            'title': article.title,
            'link': article.link,
            'description': truncate_text(article.description),
            'published_at': format_time_difference(article.published_at.isoformat() if article.published_at else None),
            'is_read': article.is_read,
            'is_favorite': article.is_favorite,
            'created_at': article.created_at.isoformat(),
            'updated_at': article.updated_at.isoformat(),
            'bookmark': article.bookmark
        }
        article_list.append(article_data)

    return jsonify(article_list)


# 检查RSS源是否符合规范
@app.route('/check-rss', methods=['POST'])
@cross_origin()
def check_rss():
    data = request.get_json()
    url = data.get('url')
    try:
        feed = feedparser.parse(url)

        if 'title' in feed.feed:
            # 获取 RSS 源的标题、描述和链接
            title = feed.feed.title
            description = feed.feed.description if 'description' in feed.feed else None
            link = feed.feed.link if 'link' in feed.feed else None

            # 返回有效的 RSS 源信息
            return jsonify({'isValid': True, 'rssInfo': {'title': title, 'description': description, 'link': url}})
        else:
            # RSS 源无效，返回相应信息
            return jsonify({'isValid': False, 'message': 'Invalid RSS source'})

    except Exception as e:
        # 发生错误
        return jsonify({'isValid': False, 'error': str(e)}), 500


# 获取RSS源的详细信息
@app.route('/get_rss_info/<string:id>', methods=['GET'])
@cross_origin()
def get_rss_info(id):
    print(id)
    try:
        feed_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid article ID'})
    rss_data = RSSSource.query.filter_by(id=feed_id).first()
    count = Article.query.filter_by(feed_id=feed_id, is_read=0, is_deleted=0).count()
    if feed_id == 479:
        count = Article.query.filter_by(is_read=0, is_deleted=0).count()
    if feed_id == 478:
        count = Article.query.filter_by(is_read=0, is_deleted=0, is_favorite=1).count()
    if rss_data:
        rss = {
            'id': rss_data.id,
            'title': rss_data.title,
            'url': rss_data.url,
            'description': truncate_text(rss_data.description),
            'count': count
        }
    print(rss)
    return jsonify(rss)


# 删除订阅
@app.route('/delete-rss/<string:id>', methods=['GET'])
@cross_origin()
def delete_rss(id):
    print(id)
    try:
        feed_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid feed ID'})
    rss = RSSSource.query.filter_by(id=feed_id).first()
    print(rss.is_deleted)
    rss.is_deleted = 1
    db.session.commit()
    articles = Article.query.filter_by(feed_id=feed_id).all()
    if articles:
        for article in articles:
            article.is_deleted = 1  # 或者根据需要设置为False
    db.session.commit()
    return jsonify({'message': '已成功删除！'})
    # else:
    #     return jsonify({'error': '找不到指定的RSS源'})


# 更新阅读状态
@app.route('/update-article-read/<string:id>', methods=['GET'])
@cross_origin()
def update_article_read(id):
    print(id)
    try:
        article_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid article ID'})

    article = Article.query.get(article_id)
    if article:
        article.is_read = 1  # 或者根据需要设置为False
        db.session.commit()
        return jsonify({'message': '文章已标记为已读'})
    else:
        return jsonify({'error': '找不到指定的文章'})


# 更新收藏状态
@app.route('/update-article-bookmark/<string:id>', methods=['GET'])
@cross_origin()
def update_article_bookmark(id):
    print(id)
    try:
        article_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid article ID'})

    article = Article.query.get(article_id)
    if article:
        if article.is_favorite == 1:
            article.is_favorite = 0
        else:
            article.is_favorite = 1
        db.session.commit()
        return jsonify({'message': '更新成功！'})
    else:
        return jsonify({'error': '找不到指定的文章'})


# 更新阅读状态
@app.route('/update-article-read-state/<string:id>', methods=['GET'])
@cross_origin()
def update_article_read_state(id):
    print(id)
    try:
        article_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid article ID'})

    article = Article.query.get(article_id)
    if article:
        if article.is_read == 1:
            article.is_read = 0
        else:
            article.is_read = 1
        db.session.commit()
        return jsonify({'message': '文章已更新标记！'})
    else:
        return jsonify({'error': '找不到指定的文章'})


# 获取文章
@app.route('/get-article/<string:id>', methods=['GET'])
@cross_origin()
def get_article(id):
    print(id)
    try:
        feed_id = int(id)
    except ValueError:
        return jsonify({'error': 'Invalid article ID'})
    if feed_id == 478:
        articles = Article.query.filter_by(is_deleted=0, is_favorite=1).order_by(desc(Article.published_at)).all()
        if articles:
            article_list = []

            for article in articles:
                article_data = {
                    'id': article.id,
                    'feed_id': article.feed_id,
                    'title': article.title,
                    'link': article.link,
                    'description': truncate_text(article.description),
                    'published_at': format_time_difference(
                        article.published_at.isoformat() if article.published_at else None),
                    'is_read': article.is_read,
                    'is_favorite': article.is_favorite,
                    'created_at': article.created_at.isoformat(),
                    'updated_at': article.updated_at.isoformat(),
                    'bookmark': article.bookmark
                }
                article_list.append(article_data)

            return jsonify(article_list)
    if feed_id == 479:
        articles = Article.query.filter_by(is_deleted=0).order_by(desc(Article.published_at)).all()
        if articles:
            article_list = []

            for article in articles:
                article_data = {
                    'id': article.id,
                    'feed_id': article.feed_id,
                    'title': article.title,
                    'link': article.link,
                    'description': truncate_text(article.description),
                    'published_at': format_time_difference(
                        article.published_at.isoformat() if article.published_at else None),
                    'is_read': article.is_read,
                    'is_favorite': article.is_favorite,
                    'created_at': article.created_at.isoformat(),
                    'updated_at': article.updated_at.isoformat(),
                    'bookmark': article.bookmark
                }
                article_list.append(article_data)

            return jsonify(article_list)

    articles = Article.query.filter_by(feed_id=feed_id, is_deleted=0).order_by(desc(Article.published_at)).all()

    if articles:
        article_list = []

        for article in articles:
            article_data = {
                'id': article.id,
                'feed_id': article.feed_id,
                'title': article.title,
                'link': article.link,
                'description': truncate_text(article.description),
                'published_at': format_time_difference(
                    article.published_at.isoformat() if article.published_at else None),
                'is_read': article.is_read,
                'is_favorite': article.is_favorite,
                'created_at': article.created_at.isoformat(),
                'updated_at': article.updated_at.isoformat(),
                'bookmark': article.bookmark
            }
            article_list.append(article_data)

        return jsonify(article_list)
    else:
        return jsonify({'error': 'Article not found'})


# 添加 RSS 源的视图函数
@app.route('/add-rss-source', methods=['POST'])
@cross_origin()
def add_rss_source():
    print("添加rss源")
    data = request.json
    title = data.get('title')
    url = data.get('url', '')
    print(url)
    image = None
    category_id = 56
    description = data.get('description')

    rss_source = RSSSource(url=url, title=title, description=description, image=image, category_id=category_id, created_at=datetime.utcnow(), updated_at=datetime.utcnow())

    db.session.add(rss_source)
    db.session.commit()

    return jsonify({'message': 'RSS source added successfully'})


# 导出为opml文件
@app.route('/export-opml')
@cross_origin()
def export_opml():
    rss_sources = RSSSource.query.all()

    # 创建根元素 <opml>
    opml = ET.Element('opml', version='1.0')

    # 创建 <head> 元素
    head = ET.SubElement(opml, 'head')

    # 创建 <body> 元素
    body = ET.SubElement(opml, 'body')

    # 添加 <outline> 元素到 <body>
    for source in rss_sources:
        outline = ET.SubElement(body, 'outline', text=source.title, type='rss', xmlUrl=source.url)

    # 创建 ElementTree 对象并写入内存
    tree = ET.ElementTree(opml)
    file_obj = BytesIO()
    tree.write(file_obj, encoding='utf-8', xml_declaration=True)
    file_obj.seek(0)

    return send_file(file_obj, download_name='rss_sources.opml', mimetype='application/xml', as_attachment=True)


# 上传文件
# @app.route('/import-opml', methods=['POST'])
# @cross_origin()
# def import_opml():
#     file = request.files['FormDatas']
#     print(file)
#
#     # 解析OPML文件
#     opml_tree = ET.parse(file)
#     opml_root = opml_tree.getroot()
#
#     rss_resources = []
#
#     # 遍历 <outline> 元素，提取标题和链接
#     for outline_elem in opml_root.iter('outline'):
#         title = outline_elem.get('title')
#         url = outline_elem.get('xmlUrl')
#
#         if title and url:
#             rss_resource = RSSSource(title, url)
#             rss_resources.append(rss_resource)
#
#     # 将解析的数据存入数据库
#     for rss_resource in rss_resources:
#         rss_source = RSSSource(
#             url=rss_resource.url,
#             title=rss_resource.title,
#             description='',  # 设置为空字符串或其他默认值
#             image='',  # 设置为空字符串或其他默认值
#             category_id=81,  # 设置为默认值
#             created_at=datetime.utcnow(),
#             updated_at=datetime.utcnow(),
#             is_deleted=False  # 设置为默认值
#         )
#         db.session.add(rss_source)
#
#     db.session.commit()
#
#     return jsonify({'message': 'OPML文件导入成功'})

@app.route('/import-opml', methods=['POST'])
@cross_origin()
def import_opml():
    file = request.files['file']  # 注意此处的 'file' 对应于前端 FormData.append('file', ...) 中的 'file'

    # 解析OPML文件
    opml_tree = ET.parse(file)
    opml_root = opml_tree.getroot()

    rss_resources = []

    # 遍历 <outline> 元素，提取标题和链接
    for outline_elem in opml_root.iter('outline'):
        title = outline_elem.get('title')
        url = outline_elem.get('xmlUrl')

        if title and url:
            rss_resource = RSSSource(title=title, url=url, image=None, description='', category_id=56, created_at=datetime.utcnow(), updated_at=datetime.utcnow(), is_deleted=0)
            rss_resources.append(rss_resource)

    # 将解析的数据存入数据库
    for rss_resource in rss_resources:
        rss_source = RSSSource(
            url=rss_resource.url,
            title=rss_resource.title,
            description='',  # 设置为空字符串或其他默认值
            image='',  # 设置为空字符串或其他默认值
            category_id=56,  # 设置为默认值
            created_at=datetime.utcnow(),
            updated_at=datetime.utcnow(),
            is_deleted=False  # 设置为默认值
        )
        db.session.add(rss_source)

    db.session.commit()

    return jsonify({'message': 'OPML文件导入成功'})


@app.route('/search-articles', methods=['POST'])
@cross_origin()
def search_articles():
    search_text = request.json.get('search_text', '')  # 获取前端传递的搜索文本

    # 使用模糊匹配查询文章标题包含搜索文本的文章
    matched_articles = Article.query.filter(Article.title.ilike(f'%{search_text}%')).all()

    table_data = []  # 存储匹配结果的表格数据数组

    for article in matched_articles:
        table_data.append({
            'title': article.title,
            'link': article.link,
            'publishedAt': article.published_at,
            'description': truncate_text(article.description)
            # 添加其他所需的文章信息字段
        })

    return jsonify({'table_data': table_data})

def check_rss_updates():
    with app.app_context():
        rss = RSSSource.query.filter_by(is_deleted=False).distinct().all()
        rss_urls = [item.url for item in rss]
        print(rss_urls)
        prev_entries = {url: [] for url in rss_urls}

        while True:
            print("Job function started")
            for rss_url in rss_urls:
                feed = feedparser.parse(rss_url)
                print(feed)
                new_entries = feed.entries
                updated_entries = [entry for entry in new_entries if entry not in prev_entries[rss_url]]
                print(new_entries)

                if updated_entries:
                    # Get the rss_title based on rss_url from the database
                    rss_title = next((source.title for source in rss if source.url == rss_url), '')

                    # Get the chname based on category_id from the database
                    category_id = next((source.category_id for source in rss if source.url == rss_url), None)
                    chname = None
                    if category_id is not None:
                        category = Category.query.filter_by(id=category_id).first()
                        if category:
                            chname = category.chname

                    # Add the chname to each entry in the updated_entries list
                    for entry in updated_entries:
                        entry['rss_title'] = rss_title
                        entry['chname'] = chname

                    # Send the updated articles data to the front-end
                    socketio.emit('new_articles', {'articles': updated_entries})

                prev_entries[rss_url] = new_entries
                print("Job function completed")
            time.sleep(300)  # 5分钟后再次运行


# 在后台运行 check_rss_updates() 函数
rss_thread = threading.Thread(target=check_rss_updates)
rss_thread.start()


if __name__ == '__main__':
    socketio.run(app)

