# -*- coding: utf-8 -*-
# @Time    : 2024/5/11 13:22
# @Author  : Alvin
# @File    : xhs_route.py
import json
import os
from flask import Blueprint, request, jsonify,g
import requests
from ..utils.trace_job_status import check_scrapyd_job_status
from ..utils.scrapyd_job_operation import start_scrapyd_job
from ..utils.get_job_id import get_uuid_str
from ..utils.redis_service import set_progress
from ..utils.db_service import get_db
from concurrent.futures import ThreadPoolExecutor
from flask_jwt_extended import jwt_required, get_jwt_identity
from datetime import datetime
xhs_bp = Blueprint('xhs',__name__)

@xhs_bp.route("/getNoteById", methods=['GET'])
def get_note_by_id():
    note_id = request.args.get('noteId')
    db = get_db()
    result = db.fetchone("SELECT * FROM notes WHERE note_id = %s", (note_id,))
    # 先判断笔记是否在表中，如果在直接返回，如果不在，再发起请求
    if result:
        return jsonify({'status': 'success', 'data': result}), 200
    else:
        job_id = get_uuid_str()
        data = {
            "project":'scrapy',
            "spider":'Id',
            "note_id":note_id,
            "job_id": job_id
        }
        # 通过scrapyd发起爬虫任务
        json_res = start_scrapyd_job(data)
        if json_res['status'] == 'ok':
            # 使用线程池来执行任务状态检查
            with ThreadPoolExecutor() as executor:
                future = executor.submit(check_scrapyd_job_status, job_id)

                # 等待线程执行完成
                job_status = future.result()

                if job_status['status'] == 'success':
                    # 任务成功，查询数据库并返回结果
                    # data = get_data_from_database(note_id)
                    # return jsonify({'status': 'success', 'data': data})
                    result = db.fetchone("SELECT * FROM notes WHERE note_id = %s", (note_id,))
                    if result:
                        print(type(result))
                        return jsonify({'status': 'success', 'data': result}), 200
                    else:
                        return jsonify({'status': 'error', 'message': 'Note not found'}), 404
                    # print(result)
                else:
                    return jsonify({'status': 'error', 'message': 'Job failed'}), 500
        else:
            return jsonify({'status': 'error', 'message': 'Failed to start job'}), 500
@xhs_bp.route('/getSearchResultByKeyword', methods=['GET'])
def get_search_result_by_keyword():
    keyword = request.args.get('keyword')

    page_num = request.args.get('pageNum',default=1,type=int)
    for_crawl = request.args.get("for_crawl",default=False,type=bool)
    # print(for_crawl)
    db = get_db()
    # result = db.fetchall("select * From keyword_notes WHERE keyword = %s",(keyword,))
    # 模糊匹配
    result = db.fetchall("SELECT * FROM keyword_notes WHERE keyword LIKE %s", ('%' + keyword + '%',))
    # print(result)
    if result and not for_crawl:
        final_result = []
        for data in result:
            note_id = data['keyword_note_id']
            final_result.append(db.fetchone("select * From notes WHERE note_id = %s",(note_id,)))
        return jsonify({'status': 'success', 'data': final_result}), 200
    else:
        print(f"正在采集{keyword}_pageNum{page_num}")
        job_id = get_uuid_str()
        # 通过scrapyd发起爬虫任务
        data = {
            "project": 'scrapy_service',
            "spider": 'note_by_keyword',
            "keyword": keyword,
            "page_num":page_num,
            "job_id": job_id,

        }
        if keyword and page_num:
            # 通过scrapyd发起爬虫任务
            json_res = start_scrapyd_job(data)
            if json_res['status'] == 'ok':
                # 刷新获取爬取状态，如果爬取完成则从数据库查出
                # 使用线程池来执行任务状态检查
                with ThreadPoolExecutor() as executor:
                    future = executor.submit(check_scrapyd_job_status, job_id)

                    # 等待线程执行完成
                    job_status = future.result()

                    if job_status['status'] == 'success':
                        result = db.fetchall("select * From keyword_notes WHERE keyword = %s", (keyword,))
                        if result:
                            # print(type(result))
                            return jsonify({'status': 'success', 'data': result}), 200
                        else:
                            return jsonify({'status': 'error', 'message': 'Note for keyword not found'}), 404

                    else:
                        return jsonify({'status': 'error', 'message': 'Job failed'}), 500
            else:
                return jsonify({'status': 'error', 'message': 'Failed to start job'}), 500

@xhs_bp.route('/getNotesByPosterId', methods=['GET'])
def get_notes_by_poster_id():
    poster_id = request.args.get('posterId')
    db = get_db()
    result = db.fetchall("select * From publisher_notes WHERE publisher_id = %s",(poster_id,))
    if result:
        final_result = []
        for data in result:
            note_id = data['publisher_note_id']
            final_result.append(db.fetchone("select * From notes WHERE note_id = %s", (note_id,)))
        return jsonify({'status': 'success', 'data': final_result}), 200
    return jsonify({'status': 'error', 'data': "库中没有该发布者，去任务中心发起请求后再查看吧!ps aux | grep twistd"}), 500


@xhs_bp.route('/crawl_notes_by_keyword', methods=['POST'])
@jwt_required()
def crawl_notes_by_keyword():
    username = get_jwt_identity()

    args = request.json
    print(args)
    keyword = args['params']['inputData']
    # keyword = request.args.get('keyword')
    # api_file = request.args.get('apiFile')
    # api_function = request.args.get('apiFunction')

    job_id = get_uuid_str()
    # 通过scrapyd发起爬虫任务
    data = {
        "project": 'scrapy_service',
        "spider": 'note_by_keyword',
        "keyword": keyword,
        "page_num": 1,
        "job_id": job_id,
        "jobid":job_id #覆盖默认的uuid

    }
    job_status = start_scrapyd_job(data)
    if job_status['status'] == 'ok':
        # 获取scrapyd的任务id
        # scrapyd_job_id = job_status['job']
        db = get_db()
        user_id = db.fetchone("select user_id from users where username = %s",(username,))['user_id']
        print(user_id)
        progress = {'status': 'pending'}
        set_progress(job_id,progress)
        now = datetime.now()

        # 格式化日期和时间
        formatted_now = now.strftime("%Y-%m-%d %H:%M:%S")
        db.insert_one(table="task_records",data={
            'task_id':job_id,
            'start_time':formatted_now,
            'status': 'pending',
            'task_name': keyword,
            'params': json.dumps(args, ensure_ascii=False),
            'user_id':user_id
            # "scrapyd_job_id": scrapyd_job_id
        })

        return jsonify({'status':200, 'data':'任务创建成功!'}),200
    else:
        return jsonify({'status':400, 'data':'创建失败!'}), 400





@xhs_bp.route('/crawl_note_by_id', methods=['GET'])
def crawl_note_by_id():
    note_id = request.args.get('noteId')
    api_file = request.args.get('apiFile')
    api_function = request.args.get('apiFunction')
    job_id = get_uuid_str()
    # 通过scrapyd发起爬虫任务
    data = {
        "project": 'scrapy_service',
        "spider": 'Id',
        "note_id": note_id,
        "page_num": 1,
        "job_id": job_id,
        "jobid": job_id #覆盖默认的uuid
    }
    job_status = start_scrapyd_job(data)
    if job_status['status'] == 'ok':
        # 获取scrapyd的任务id
        # scrapyd_job_id = job_status['job']
        db = get_db()
        progress = {'status': 'pending'}
        set_progress(job_id,progress)
        now = datetime.now()

        # 格式化日期和时间
        formatted_now = now.strftime("%Y-%m-%d %H:%M:%S")
        db.insert_one(table="task_records",data={
            'task_id':job_id,
            'start_time':formatted_now,
            'status': 'pending',
            'task_name': note_id,
            "api_file": api_file,
            "api_function": api_function,
            # "scrapyd_job_id": scrapyd_job_id
        })

        return jsonify({'status':200, 'data':'任务创建成功!'}),200
    else:
        return jsonify({'status':400, 'data':'创建失败!'}), 400


@xhs_bp.route('/crawl_notes_by_poster_id', methods=['POST'])
# @jwt_required()
def crawl_notes_by_poster_id():
    # note_id = request.args.get('po')
    # api_file = request.args.get('apiFile')
    # api_function = request.args.get('apiFunction')
    # username = get_jwt_identity()

    args = request.json
    print(args)
    poster_id = args['params']['inputData']
    job_id = get_uuid_str()
    # 通过scrapyd发起爬虫任务
    data = {
        "project": 'scrapy_service',
        "spider": 'note_by_poster_id',
        "poster_id": poster_id,
        "job_id": job_id,
        "jobid": job_id #覆盖默认的uuid
    }
    job_status = start_scrapyd_job(data)
    if job_status['status'] == 'ok':
        # 获取scrapyd的任务id
        # scrapyd_job_id = job_status['job']
        db = get_db()
        progress = {'status': 'pending'}
        set_progress(job_id,progress)
        now = datetime.now()
        # user_id = db.fetchone("select user_id from users where username = %s",(username,))['user_id']
        # print(user_id)
        # 格式化日期和时间
        formatted_now = now.strftime("%Y-%m-%d %H:%M:%S")
        db.insert_one(table="task_records",data={
            'task_id':job_id,
            'start_time':formatted_now,
            'status': 'pending',
            'task_name': poster_id,
            'params': json.dumps(args, ensure_ascii=False),
            'user_id': 2

            # "scrapyd_job_id": scrapyd_job_id
        })

        return jsonify({'status':200, 'data':'任务创建成功!'}),200
    else:
        return jsonify({'status':400, 'data':'创建失败!'}), 400




