# -*- coding: UTF-8 -*-
"""
Created on 2018年5月4日
@author: Leo
"""

# 内部库
import os
import json
import platform
import multiprocessing
from subprocess import Popen, PIPE, getstatusoutput

# 第三方库
from flask import jsonify
from flask_restful import Resource, reqparse

# 项目内部库
from scl_spider_backend.common.mgo import MgoConf


class SpiderArticle(Resource):
    def __init__(self):
        # 数据库连接对象
        self.mgo = MgoConf()

        # get方法的parser
        self.get_parser = reqparse.RequestParser()
        self.get_parser.add_argument('text', location=['args', 'json'], type=str)
        self.get_parser.add_argument('tp', location=['args', 'json'], type=int)
        self.get_parser.add_argument('t_id', location=['args', 'json'], type=str)

    @staticmethod
    def return_center(system: str, path: str, msg: str, code: int):
        """
        返回中心
        :param system: 系统名
        :param path: 路径
        :param msg: 信息
        :param code: 代码
        :return: json
        """
        return jsonify({"System": system,
                        "WorkPath": path,
                        "msg": msg,
                        "code": code})

    def run_process(self, spider_id, spider_name, command):
        """
        执行命令
        :param spider_id: 爬虫ID
        :param spider_name: 爬虫名称
        :param command: 命令
        """
        os.system(command)
        self.mgo.insert_data(data={"spiderId": spider_id,
                                   "spiderName": spider_name,
                                   "command": command,
                                   "status": False},
                             db_name="tb_spiderStatus")

    def construct_task(self, json_res):
        # OpenChina
        spider_list = ["Juejin", "Jianshu", "SegmentFault"]
        for spider_name in spider_list:
            command = r"start scrapy crawl {} -a text={} -a total_page={} -a t_id={}".format(spider_name,
                                                                                             json_res['text'],
                                                                                             json_res['tp'],
                                                                                             str(json_res['t_id']))
            self.run_process(json_res['t_id'], spider_name, command)

    def post(self):
        # 获取参数列表并解析
        args = self.get_parser.parse_args()
        print(args)

        # 把一个序列中的空字符串删掉
        def not_empty(s):
            return s and s.strip()

        # 切换scrapy 执行路径(不使用绝对路径)
        current_path = os.getcwd()  # 当前路径
        parent_path = os.path.abspath(os.path.join(current_path, os.pardir))  # 上一级路径
        target = list(filter(not_empty, [file if "scl_spider" == file else "" for file in os.listdir(parent_path)]))[0]
        os.chdir(parent_path + "\\" + target)

        # 获取系统
        system = platform.system()

        if system == "Windows":
            # 执行Scrapy爬虫
            self.construct_task(json_res=args)
            return self.return_center(str(system), str(os.getcwd()), "Success", 200)
        elif system == "Linux":
            return self.return_center(str(system), str(os.getcwd()), "Success", 200)
        else:
            return self.return_center("Unknown System", "", "Failed", 200)
        # system = platform.system()
        # return self.return_center(str(system), str(os.getcwd()), "Success", 200)
