# -*- coding: UTF-8 -*-
"""
Created on 2017年12月1日
@author: Leo
"""

# 内部库
import os

# 第三方库
from flask import jsonify
from flask_restful import Resource, reqparse

# 项目内部库
from scl_spider_backend.common.mgo import MgoConf


class SpiderSearch(Resource):
    def __init__(self):
        # 数据库连接对象
        self.mgo = MgoConf()

        # get方法的parser
        self.get_parser = reqparse.RequestParser()
        self.get_parser.add_argument('searchName', location=['args', 'json'], type=str)
        self.get_parser.add_argument('pn', location=['args', 'json'], type=int)
        self.get_parser.add_argument('t_id', location=['args', 'json'], type=str)

    def run_process(self, spider_id, spider_name, command, name):
        """
        执行命令
        :param spider_id: 爬虫ID
        :param spider_name: 爬虫名称
        :param command: 命令
        """
        os.system(command)
        self.mgo.insert_data(data={"spiderId": spider_id,
                                   "spiderName": spider_name,
                                   "command": command,
                                   "status": False,
                                   "name": name},
                             db_name="tb_spiderStatus")

    def construct_task(self, json_res):
        spider_list = ["GZ_Lib", 'school_library']
        for spider_name in spider_list:
            command = \
                r"scrapy crawl {} -a text={} -a page={}".format(spider_name,
                                                                json_res['searchName'],
                                                                str(json_res['pn']))
            self.run_process(json_res['t_id'], spider_name, command, json_res['searchName'])

    def post(self):
        # 获取参数列表并解析

        args = self.get_parser.parse_args()
        print(args)
        search_name = args['searchName']
        page_num = args['pn']

        if page_num == "":
            page_num = 1

        # 去数据库查询
        def search_from_db(name, pn):
            return self.mgo.find_data(db_name="tb_library",
                                      query={"searchName": name,
                                             "searchPage": str(pn)},
                                      page_num=0)

        # 把一个序列中的空字符串删掉
        def not_empty(s):
            return s and s.strip()

        data = search_from_db(name=search_name, pn=page_num)
        # print(data)

        if len(data['data']) > 0:
            return jsonify(data)

        else:
            # 切换scrapy 执行路径(不使用绝对路径)
            current_path = os.getcwd()  # 当前路径
            parent_path = os.path.abspath(os.path.join(current_path, os.pardir))  # 上一级路径
            target = list(filter(not_empty,
                                 [file if "scl_spider" == file else "" for file in os.listdir(parent_path)]))[0]
            os.chdir(parent_path + "\\" + target)

            # 执行Scrapy爬虫
            self.construct_task(json_res=args)

            # 查询
            return jsonify(search_from_db(name=search_name, pn=page_num))
        # return jsonify({"msg": "Success"})
