import inspect
import logging
import os
import sys
import requests
import json
import time
import gzip
import datetime
import multiprocessing

TIMEOUT = 200

# 举例获取page过程
# click和biz类似

DOCS_PER_REQ = 100

CONTENT_URL = 'http://weixindata.pullword.com:12345/page/%s?auth_usr=youkayun'
N_PROCESS = 25
Line_PER_FILE = N_PROCESS * DOCS_PER_REQ * 1


def lineno():
    """Returns the current line number in our program."""
    return inspect.currentframe().f_back.f_lineno


# 获取page范围
def get_page_range():
    url = 'http://weixindata.pullword.com:12345/page/range?auth_usr=youkayun'
    try:
        # 使用requests获取结果
        r = requests.get(url, timeout=TIMEOUT)
        if r.status_code == 200:
            # 使用json.loads转义json结果
            rst = json.loads(r.text)
            if 'error' in rst:
                # 返回error
                raise Exception()
            else:
                # id最小值和id最大值
                min_id = rst['minid']
                max_id = rst['maxid']
                return int(min_id), int(max_id)
        else:
            # HTTP_RET返回非200
            msg = "Error get url: %s with code: %d" % (url, r.status_code)
            logging.error(msg)
            print(msg)
            raise Exception()
    except Exception as e:
        # 出错了
        msg = "Error with exception %s " % str(e)
        logging.error(msg)
        print(msg)
        return None


# 获取page内容
def get_page_content(page_start_id):
    url = 'http://weixindata.pullword.com:12345/page/%s?auth_usr=youkayun' % page_start_id
    items = None
    try:
        # 使用requests获取结果
        r = requests.get(url, timeout=TIMEOUT)
        if r.status_code == 200:
            # 使用json.loads转义json结果
            items = json.loads(r.text)
            # items = r.json()
            rst = []
            # 结果为一个数组
            for item in items['pages']:
                # page表的id
                page_id = item['id']
                # page表的url
                url = item['url']
                # page表的title
                title = item['title']
                # page表的content
                content = item['content']
                # page表的ts
                ts = item['ts']
                rst.append((page_id, url, title, content, ts))
            return rst
        else:
            # HTTP_RET返回非200
            msg = "Error get url: %s with code: %d" % (url, r.status_code) + " in line " + str(lineno())
            logging.error(msg)
            raise Exception()
    except Exception as e:
        print(items)
        # 出错了
        msg = "Error with exception %s " % str(e) + " in line " + str(lineno())
        logging.error(msg)
        return None


def preprocess(x):
    return x.replace("\r\n", ' ').replace("\r", ' ').replace("\n", ' ')


def write_gzip(file, content):
    f = gzip.open(file, 'ab')
    f.write(bytes(content, 'UTF-8'))
    f.close()


def write_conf(conf_file, cont):
    with open(conf_file, 'w+') as fd:
        fd.write(cont)


def read_conf(conf_file):
    with open(conf_file) as fd:
        min_id, max_id = get_page_range()
        conf = fd.readline().split(",")
        id_begin = int(conf[1])
        id_end = int(conf[2])

        id_begin = max(id_begin, int(min_id))

    return id_begin, int(max_id), int(min_id), id_end


# page主流程样例（需要额外增加异常处理）
def get_page_100(start_id):
    # 获取range
    # min_id, max_id = get_page_range()
    # start_id = max(int(min_id), int(start_id))

    # 获取page内容
    rst = get_page_content(str(start_id))
    length = len(rst) if rst else 0
    lines = ''
    timestamp = 0
    end_id = start_id + DOCS_PER_REQ
    # 判断rst是否为空，空则表示数据还未更新到这个id上
    if length == 0:
        time.sleep(0.1)
    else:
        lines = '\n'.join(['\t'.join([preprocess(str(s1)) for s1 in s]) for s in rst])
        for k in rst:
            timestamp = k[-1]
            if timestamp:
                break

        # timestamp = rst[0][-1]
        end_id = max([int(x[0]) for x in rst])
        if end_id == start_id:
            end_id += length
    print("%s %s %s" % (start_id, length, timestamp))
    # time.sleep(0.1)
    # sleep一段时间
    return lines, length, timestamp, int(end_id)


def start_process():
    print('Starting ',multiprocessing.current_process().name)


def download_multi_process(pool, start_id, max_id, save_dir, conf, num_per_file=Line_PER_FILE):

    n = int(num_per_file / N_PROCESS / DOCS_PER_REQ)

    save_file = ""

    for i in range(0, n):
        if max_id - start_id < DOCS_PER_REQ:
            msg = "Data overhead!"
            print(msg)
            logging.error(msg)
            break
        content = ""
        # pool = multiprocessing.Pool(processes=N_PROCESS, initializer=start_process, )
        start_ids = [start_id + i * DOCS_PER_REQ for i in range(0, N_PROCESS)]
        pool_outputs = pool.map(get_page_100, start_ids)

        lengths = []
        timestamps = []
        end_ids = []
        for d in pool_outputs:
            lines, length, timestamp, end_id = d
            content += lines + '\n'
            timestamps.append(timestamp)
            lengths.append(length)
            end_ids.append(end_id)

        timestamps = [t for t in timestamps if t]
        if timestamps:
            timestamp = min(timestamps)
        else:
            timestamp = None

        end_ids = [t for t in end_ids if t]
        if end_ids:
            end_id = max(end_ids)
        else:
            end_id = start_id + DOCS_PER_REQ

        lengths = [t for t in lengths if t]
        if lengths:
            length = max(lengths)
        else:
            length = 0

        print('3 %d' % start_id)

        if not timestamp:
            start_id += DOCS_PER_REQ
            write_conf(conf, "%d,%d,%d,%d" % (max_id, start_id, end_id, DOCS_PER_REQ))
            continue
        if not save_file:
            date = datetime.datetime.fromtimestamp(int(timestamp)).strftime("%Y%m%d_%H%M%S") + ".gz"
            save_file = save_dir + date

        if length > 0:
            start_id = end_id
        elif length == 0 and max_id - start_id > DOCS_PER_REQ:
            start_id += DOCS_PER_REQ
        else:
            start_id += 1
        print('4 %d ' % start_id)

        try:
            if content:
                write_gzip(save_file, content)
        except Exception as e:
            msg = 'Error: ' + str(e) + " in line " + str(lineno())
            logging.error(msg)
            print(msg)

        try:
            write_conf(conf, "%d,%d,%d,%d" % (max_id, start_id, end_id + 100, DOCS_PER_REQ))
        except Exception as e:
            msg = "Exception %s" % str(e) + " in line " + str(lineno())
            logging.error(msg)

    return start_id


def download_wx_page_all(save_dir, conf, pool):
    start_id, max_id, min_id, end_id = read_conf(conf)
    start_id = max(min_id, start_id)
    N = 100

    for i in range(0, N):
        if max_id < start_id + Line_PER_FILE:
            break
        start_id = download_multi_process(pool, start_id, max_id, save_dir, conf, Line_PER_FILE)


def keep_one_instance():
    file = os.path.basename(__file__)
    processes = [d for d in os.popen("ps aux | grep %s" % file).read().split("\n")]
    result = filter(lambda x: not ("grep" in x), processes)
    n = sum([file in d for d in result])
    if n > 1:
        msg = "another process is running %s" % file
        print(msg)
        exit(1)


if __name__ == "__main__":
    keep_one_instance()
    conf = sys.argv[1]
    save_dir = sys.argv[2]
    log_dir = sys.argv[3]

    timestamp = str(datetime.datetime.today())[0:10]
    pool = multiprocessing.Pool(processes=N_PROCESS, initializer=start_process, )

    logfile = log_dir + '/' + os.path.basename(__file__).split('.')[0] + '-' + timestamp + '.log'
    # write logs
    logging.basicConfig(filename=logfile, level=logging.ERROR, format='%(asctime)s %(levelname)s:%(message)s')

    download_wx_page_all(save_dir, conf, pool)
    pool.close()
    pool.join()
