#!/usr/bin/python3
import os, time, json, copy
try:
    import regex as re
except ImportError:
    import re
from urllib.request import urlretrieve

"""
根据automerge.json中的规则合并多个书源，规则见下面json注释
"""

"""json

基本信息：
url -string: 书源链接
name -string: 该书源的标记，不要重复

修改信息，从上到下执行：
excludes -["regex key1", "regex key2"]: 正则字符串列表，任意组名或源名匹配任意key则跳过该源的添加
renames -{"regex key": "value"}: 字典，组名匹配key的正则则用value值替换匹配的组名，源名匹配key则添加名为value的新组
default -string: 如果组是空的则添加默认组
group -string: 不管组是不是空的都添加该分组
updates -{"regex key": {"name": "value"}}: 字典，源名匹配key则用对应字典更新该源

"""

def compile(list):
    """事前编译正则，提高速度"""
    if isinstance(list, dict):
        out = {}
        for key in list:
            out[re.compile(key)] = list[key]
    else:
        out = []
        for str in list:
            out.append(re.compile(str))
    return out


class Srcs(list):
    """
    书源集的类
    """
    def __init__(self, srcs={}):
        super().__init__()
        if len(srcs) == 0:
            return
        try:
            # 基本信息
            url = srcs['url']
            name = srcs['name']
            temp = os.path.join(cache, 
                               (name if '.json' == os.path.splitext(name)[-1].lower() 
                                     else name + '.json'))
            
            # 修改信息
            excludes = compile(srcs['excludes'])
            renames = compile(srcs['renames'])
            default = srcs['default']
            group = srcs['group']
            updates = compile(srcs['updates'])
        except Exception as e:
            print('Init failed, because {}'.format(e))
            return
        if url:
            try:
                print('\nStart init {}'.format(name))
                if not os.path.exists(temp):
                    print('Update {}...'.format(name))
                    urlretrieve(url, temp)
                with open(temp, 'r', encoding='UTF-8') as f:
                    obj = json.load(f)
                    
                for i in range(len(obj) - 1, -1, -1):
                    groups = obj[i]['bookSourceGroup'].split(",")
                    # excludes attribute
                    is_popped = False
                    for key in excludes:
                        for k in range(len(groups)):
                            if key.search(groups[k]):
                                print('{}:  --  "{}"'.format(name, obj[i]['bookSourceName']))
                                obj.pop(i)
                                is_popped = True
                                break
                        if is_popped:
                            break
                        if key.search(obj[i]['bookSourceName']):
                            print('{}:  --  "{}"'.format(name, obj[i]['bookSourceName']))
                            obj.pop(i)
                            is_popped = True
                            break
                    if is_popped:
                        continue
                    
                    # renames attribute
                    for key in renames:
                        for k in range(len(groups)):
                            if key.search(groups[k]):
                                if renames[key]:
                                    print('{}: in "{}", group "{}" -> "{}"'.format(name, 
                                          obj[i]['bookSourceName'], groups[k], renames[key]))
                                else:
                                    print('{}: in "{}", groups  --  "{}"'.format(
                                          name, obj[i]['bookSourceName'], groups[k]))
                                groups[k] = key.sub(renames[key], groups[k])
                        if key.search(obj[i]['bookSourceName']):
                            print('{}: in "{}", groups  ++  "{}"'.format(
                                  name, obj[i]['bookSourceName'], renames[key]))
                            groups.append(renames[key])
                    
                    # default attribute
                    if default and len(list(filter(lambda x: x, groups))) == 0:
                        print('{}: in "{}", groups  ++  "{}" (default)'.format(
                              name, obj[i]['bookSourceName'], default))
                        groups.append(default)
                    
                    # group attribute
                    if group:
                        print('{}: in "{}", groups  ++  "{}" (group)'.format(
                              name, obj[i]['bookSourceName'], group))
                        groups.append(group)
                    
                    # updates attribute
                    for key in updates:
                        if key.search(obj[i]['bookSourceName']):
                            obj[i].update(updates[key])
                            print('{}: in "{}",  +=  "{}"'.format(name, 
                                  obj[i]['bookSourceName'], str(updates[key])))
                    
                    # 去重+去空
                    groups = list(filter(lambda x: x, sorted(set(groups), key=groups.index)))
                    obj[i]['bookSourceGroup'] = ','.join(groups)
                self.extend(obj)
            except Exception as e:
                print('init "{}" failed, because {}'.format(name, e))

    
    def concat(self, srcs):
        """将本书源集连接另一个书源集"""
        selfcopy = copy.deepcopy(self)
        pattern = re.compile("^https?://")
        for src in srcs:
            should_add = True
            for i in selfcopy:
                # 忽略http标识后在比较
                if pattern.sub("", src['bookSourceUrl']) == pattern.sub("", i['bookSourceUrl']):
                    should_add = False
                    break
            if should_add:
                if 'customOrder' in src:
                    src['customOrder'] = len(self)
                else:
                    src['serialNumber'] = len(self)
                self.append(src)
        return self


def checkCache():
    if not os.path.exists(cache):
        os.mkdir(cache)
    for file in os.listdir(cache):
        file = os.path.join(cache, file)
        if os.path.isfile(file):
            if time.time() - os.path.getmtime(file) > 3 * 24 * 3600:
               # 效果一样，3天未更新，上面的仅使用time，下面的仅使用datetime
               #datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(file)) > datetime.timedelta(days=3):
                os.remove(file)
                print('"{}" has not been updated for 3 days. Deleted it.'.format(file))


cache = '.cache'
checkCache()
result = Srcs()
try:
    with open("automerge.json", "r", encoding='utf8') as f:
        print('Reading config...')
        jsonData = json.load(f)
except Exception as e:
    print('Error: {}, check you json file'.format(e))
    
print('Downloading and concatting...')
for obj in jsonData:
    result.concat(Srcs(obj))
# 输出
with open("sources.json", "w", encoding='utf8') as f:
    print('Writting sources...')
    json.dump(result, f, indent=2, ensure_ascii=False)
print('All finished.')
