#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@create Time:2018-11-22

@author:Brook
"""
import re
from datetime import datetime
from functools import lru_cache
from itertools import groupby
from json import JSONDecodeError

import requests


class ScrapydClient:
    def __init__(self, ip, port):
        self.base_url = "http://%s:%s" % (ip, port)

    def is_valid(self):
        """scrapyd 服务是否可用
        """
        url = self.base_url + "/daemonstatus"
        try:
            rsp = requests.get(url)
        except requests.ConnectionError:
            return False 
        try:
            d = rsp.json()
        except JSONDecodeError:
            return False
        else:
            return True if d.get("status") == "ok" else False
    
    def schedule_scrapy(self, post_data):
        url = self.base_url + "/schedule.json"
        rsp = requests.post(url, data=post_data)
        data = rsp.json()
        return data
    
    def list_projects(self):
        url = self.base_url +  "/listprojects.json"
        rsp = requests.get(url)
        data = rsp.json()
        return data["projects"]
    
    def list_spiders(self, project):
        url = self.base_url + "/listspiders.json?project=%s" % project
        rsp = requests.get(url)
        data = rsp.json()
        return data["spiders"]

    def list_jobs(self, project):
        url = self.base_url + "/listjobs.json?project=%s" % project
        rsp = requests.get(url)
        data = rsp.json()
        jobs = [(job["id"], job["start_time"],job["spider"]) for job in data["running"] + data["finished"]]
        results = {}
        for key, group in groupby(sorted(jobs, key=lambda x: x[-1]), lambda x:x[-1]):
            group_new = sorted(group, key=lambda x: datetime.strptime(re.sub("\..*$", "", x[1]), "%Y-%m-%d %H:%M:%S"), reverse=True)
            results[key] = group_new[:3]
        return  results

    def get_logurl(self, project, spider, logid):
        return self.base_url + "/logs/%s/%s/%s.log" % (project, spider, logid)

    @classmethod
    @lru_cache(5)
    def get_cli(cls, ip, port):
        return cls(ip, port)

    

if __name__ == "__main__":
    ip = "192.168.2.12"
    port = "6800"
    
    scpdcli_0 = ScrapydClient.get_cli(ip, port)
    jobs = scpdcli_0.list_jobs("news")
    print(jobs)
