# coding=UTF-8
# author=suemi
# created at 16/5/13

import os

from entity.Article import Article
from utils.Constants import DATA_DIR, DATETIME_FORMAT, DATE_FORMAT
import datetime



class ArticleIterator(object):
    """
    为Article的读取提供懒加载机制支持
    """

    def __init__(self, pathList):
        self.paths = pathList
        self.count = 0
        self.size = len(pathList)

    def __iter__(self):
        return self

    def next(self):
        if (self.count < self.size):
            self.count += 1
            article = PageUtil.load(self.paths[self.count-1])
            return article
        else:
            raise StopIteration()

    def hasNext(self):
        return self.count < self.size


class PageUtil(object):
    """
    负责爬取到的页面的存储,统计,读取,查找
    采用了建造者模式
    示例: PageUtil.condition('20160519',3).source('netease').cursor(100).limit(50).iterator()
    """

    def __init__(self, crawledAt, delta):
        self.crawledAt = crawledAt
        self.delta = delta
        self.start = 0
        self.site = None
        self.limitNum = None

    @staticmethod
    def save(article):
        """
        存储单个Article对象
        :param article:
        :return:
        """
        path = DATA_DIR + "/" + article.crawledAt.strftime(DATE_FORMAT)
        if article.site != None:
            path += "/" + article.site
        if not os.path.exists(path):
            os.makedirs(path)
        path += "/" + article.identify() + ".json"
        with open(path, 'w') as writer:
            writer.write(article.toJson())

    @staticmethod
    def load(filePath):
        """
        从特定路径读取文件
        :param filePath:
        :return:
        """
        if not os.path.isfile(filePath):
            return None
        with open(filePath, "r") as file:
            article = Article().fromJson(file.read())
        return article

    @staticmethod
    def find(article):
        """
        为某个文档找到路径
        :param article:
        :return:
        """
        path = DATA_DIR + "/" + article.crawledAt.strftime(DATE_FORMAT)
        if article.site != None:
            path += "/" + article.site
        path += "/" + article.identify() + ".json"
        return path if os.path.isfile(path) else None

    @staticmethod
    def condition(crawledAt, delta=1):
        """
        指定要操作的页面范围
        :param crawledAt:
        :param delta:
        :return:
        """
        return PageUtil(crawledAt, delta)

    def limit(self, count):
        """
        指定返回的页面数目限制
        :param count:
        :return:
        """
        if count < 0:
            raise ValueError("Page Num cannot be less than 0")
        self.limitNum = count
        return self

    def cursor(self, start):
        """
        在指定的范围内指定起始位置
        :param start:
        :return:
        """
        self.start = start
        return self

    def source(self, source):
        self.site = source
        return self

    def list(self):
        """
        :return: 所有指定的页面的路径
        """
        candidates = self.all()
        if self.start > len(candidates):
            raise IndexError
        else:
            size = len(candidates) if self.limitNum is None else self.limitNum
            return candidates[self.start:min(self.start + size, len(candidates))]

    def read(self):
        """
        :return:读取所有指定页面并生成Article对象
        """
        articles = [PageUtil.load(page) for page in self.list()]
        return articles

    def iterator(self):
        """
        :return:一个可以读取页面生成Article的在指定范围内的迭代器
        """
        return ArticleIterator(self.list())

    def all(self):
        """
        :return: 只由condition和source指定的所有页面的实际路径
        """

        def fetchDir(dirPath):
            """
            取出目录以及子目录下所有文件路径
            :param dirPath:
            :return:
            """
            if os.path.isfile(dirPath):
                return [dirPath]
            candidates = [dirPath + "/" + name for name in os.listdir(dirPath)]
            results = list(filter(os.path.isfile, candidates))
            dirs = list(filter(os.path.isdir, candidates))
            for d in dirs:
                results += fetchDir(d)
            return results

        def validate(fileName):
            result = True
            try:
                datetime.datetime.strptime("_".join(fileName.split("/")[-1].split("_")[:-1]), DATETIME_FORMAT)
            except ValueError, e:
                # print e.message
                result = False
            finally:
                return result

        specified = datetime.datetime.strptime(self.crawledAt, DATE_FORMAT)
        dateList = [(specified - datetime.timedelta(i)).strftime(DATE_FORMAT) for i in range(self.delta)]
        dirList = map(lambda x: DATA_DIR + "/" + x, dateList)
        if self.site != None:
            dirList = map(lambda x: x + "/" + self.site, dirList)
        dirList = list(filter(os.path.isdir, dirList))
        paths = []
        for d in dirList:
            paths += fetchDir(d)
        paths = list(filter(validate, paths))
        paths.sort(lambda x, y:
                   datetime.datetime.strptime("_".join(x.split("/")[-1].split("_")[:-1]), DATETIME_FORMAT)
                   < datetime.datetime.strptime("_".join(y.split("/")[-1].split("_")[:-1]), DATETIME_FORMAT))
        return paths
