#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import json
from pathlib import Path
from requests_html import HTMLSession

oscid = ''
DB_PATH = Path(str(Path.joinpath(Path.home(), '.osccache.json')))

def get_session():

    global oscid
    session = HTMLSession()

    if not oscid:
        oscid = input('Please input oscid: ')

    session.headers['Cookie'] = 'oscid=' + oscid
    return session

def get(url):
    '''
    使用get方法获取指定的url内容
    parameters:
        url: str
    return: requests.Response
    '''
    session = get_session()
    r = session.get(url)
    if r.status_code != 200:
        raise Exception('Response status code is not 200')
    return r

def get_links(baseurl):
    '''
    获取指定url的二级标题链接
    parameters:
        baseurl: str
    return: list
    '''
    r = get(baseurl)
    a_elements = r.html.find('#sidebar a')
    links = []
    for a in a_elements:
        links.append(baseurl + a.attrs['href'])
    return links

def get_content(url):
    '''
    获取指定url的页面文章内容
    parameters:
        url: str
    return: str
    '''

    def text(l):
        string = ''
        for e in l:
            string += re.sub('\s','', e.text)
        return string

    r = get(url)
    table = r.html.find('#article_area .page-entry table')
    title = r.html.find('#page-title')
    doc = r.html.find('#logo-wrap')

    return {
            'doc': text(doc),
            'url': url,
            'title': text(title),
            'content': text(table)
            }

def get_url_content_pair(baseurls):
    '''
    根据baseurl获取页面的二级标题链接、获取二级链接的页面内容
    list -> list
    parameters:
        baseurls: list
    return: list
    '''
    links = []
    pairs = []
    for baseurl in baseurls:
        links += get_links(baseurl)
    for link in links:
        pairs.append(get_content(link))
    return pairs

def read_db():
    if DB_PATH.exists():
        f = open(DB_PATH.absolute(), 'r')
        contents = f.read()
        f.close()
        return json.loads(contents)
    raise Exception("File doesn't exist")

def build_db(baseurls):
    '''
    获取一组基础url中的链接对应的内容，并写入文件
    parameters:
        urls: list
    return: list
    '''
    contents = get_url_content_pair(baseurls)
    if not DB_PATH.exists():
        DB_PATH.touch()
    f = open(DB_PATH.absolute(), 'w')
    f.write(json.dumps(contents))
    f.close()
    return contents

def search(contents, string):
    '''
    在list中查找对应的字符串
    parameters:
        contents: list
        string: str
    return: dict | None
    '''
    title_results = []
    content_results = []
    for pair in contents:
        title = pair['title']
        content = pair['content']
        t_match = re.search(string, title)
        c_match = re.search(string, content)

        if (t_match and c_match) or t_match:
            title_results.append(pair)
        elif c_match:
            content_results.append(pair)
    return content_results + title_results

def read_or_build_db():
    contents = None
    while not contents:
        try:
            contents = read_db()
        except Exception:
            print("Cache file doesn't exist.")
            print('Building cache, this will take a few minutes...')
            contents = build_db(urls)
            print('Build completed.')
    return contents

# 文档地址
urls = (
        # 购物模块
        'http://doc.oschina.net/toozan.mp.api.shop',
        # 商城模块
        'http://doc.oschina.net/toozan.mp.api.mall',
        # 用户模块
        'http://doc.oschina.net/toozan.mp.api.user'
        # 地址模块
        'http://doc.oschina.net/toozan.mp.api.consignee',
        # 总店接口
        'http://doc.oschina.net/toozan.multi.mi.main',
        # 购物模块（多店）
        'http://doc.oschina.net/toozan.multi.mi.shop',
        # 商城模块（多店）
        'http://doc.oschina.net/toozan.multi.mi.mall',
        # 用户模块（多店）
        'http://doc.oschina.net/toozan.multi.mi.user'
)

if __name__ == '__main__':
    try:
        contents = read_or_build_db()
        while True:
            string = input('query> ')

            # rebuild cache
            if string == '\\build':
                print('Rebuild cache, this will take a few minutes...')
                contents = build_db(urls)
                print('Build completed.')
                continue

            results = search(contents, string)
            if len(results) > 0:
                for pair in results:
                    print(pair['doc'] + ': ' + pair['title'])
                    print(pair['url'])
                    print('')
            else:
                print('No match results.')
    except KeyboardInterrupt:
        print('\nbye.')
