import inspect
import logging
import os
import sys
import requests
import json
import time
import gzip
import datetime

# 举例获取page过程
# click和biz类似
LINE_PER_FILE = 20000
DOCS_PER_REQ = 1000
TIMEOUT = 300
USER_NAME = "youkayun"
RANGE_URL = 'http://weiboapi.pullword.com:22345/weibo/range?auth_usr=%s'
CONTENT_URL = 'http://weiboapi.pullword.com:22345/weibo/%s?auth_usr=%s'
TERMS = ['id', 'crawler_time', 'crawler_time_stamp', 'is_retweet', 'user_id', 'nick_name', 'tou_xiang', 'user_type',
         'weibo_id', 'weibo_content', 'zhuan', 'ping', 'zhan', 'url', 'device', 'locate', 'time', 'time_stamp',
         'r_user_id', 'r_nick_name', 'r_user_type', 'r_weibo_id', 'r_weibo_content', 'r_zhuan', 'r_ping', 'r_zhan',
         'r_url', 'r_device', 'r_location', 'r_time', 'r_time_stamp', 'pic_content']


def lineno():
    """Returns the current line number in our program."""
    return inspect.currentframe().f_back.f_lineno


# 获取page范围
def get_page_range():
    url = RANGE_URL % USER_NAME
    try:
        # 使用requests获取结果
        r = requests.get(url, timeout=TIMEOUT)
        if r.status_code == 200:
            # 使用json.loads转义json结果
            rst = json.loads(r.text)
            if 'error' in rst:
                # 返回error
                raise Exception()
            else:
                # id最小值和id最大值
                min_id = rst['minid']
                max_id = rst['maxid']
                return int(min_id), int(max_id)
        else:
            # HTTP_RET返回非200
            msg = "Error get url: %s with code: %d" % (url, r.status_code)
            logging.error(msg)
            raise Exception()
    except Exception as e:
        # 出错了
        msg = "Error with exception %s " % str(e)
        logging.error(msg)
        return None


# 获取page内容
def get_content(start_id):
    url = CONTENT_URL % (start_id, USER_NAME)
    try:
        # 使用requests获取结果
        r = requests.get(url, timeout=TIMEOUT)
        if r.status_code == 200:
            # 使用json.loads转义json结果
            items = json.loads(r.text)
            rst = []
            # 结果为一个数组
            for item in items['weibo']:
                rst.append(item)
            return rst
        else:
            # HTTP_RET返回非200
            msg = "Error get url: %s with code: %d" % (url, r.status_code)
            logging.error(msg)
            raise Exception()
    except Exception as e:
        # 出错了
        msg = "Error with exception %s " % str(e)
        logging.error(msg)
        return None


def preprocess(x):
    return x.replace("\r\n", ' ').replace("\r", ' ').replace("\n", ' ')


def write_gzip(file, content):
    f = gzip.open(file, 'ab')
    f.write(bytes(content, 'UTF-8'))
    f.close()


def write_conf(conf_file, cont):
    with open(conf_file, 'w+') as fd:
        fd.write(cont)


def read_conf(conf_file):
    with open(conf_file) as fd:
        min_id, max_id = get_page_range()
        conf = fd.readline().split(",")
        id_begin = int(conf[1])
        id_end = int(conf[2])

        id_begin = max(id_begin, int(min_id))

    return id_begin, int(max_id), int(min_id), id_end


# page主流程样例（需要额外增加异常处理）
def get_data(start_id):
    # 获取range
    # min_id, max_id = get_page_range()
    # start_id = max(int(min_id), int(start_id))

    # 获取page内容
    rst = get_content(str(start_id))
    length = len(rst) if rst else 0
    end_id = start_id + 1
    lines = ""
    timestamp = 0
    # 判断rst是否为空，空则表示数据还未更新到这个id上
    if length == 0:
        time.sleep(1.0)
    else:
        lines = '\n'.join(['\t'.join([preprocess(str(s[s1])) for s1 in TERMS]) for s in rst])
        for k in rst:
            timestamp = k.get('crawler_time_stamp')
            if timestamp:
                break
        end_id = max([int(x['id']) for x in rst])
        if end_id == start_id:
            end_id += length
    print("%s %s " % (start_id, length))

    # time.sleep(0.7)
    # sleep一段时间
    print(timestamp)
    return lines, length, timestamp, int(end_id)


def download(start_id, max_id, save_dir, conf, num_per_file=1000):
    n = int(num_per_file / DOCS_PER_REQ)

    content = ""
    save_file = ""

    for i in range(0, n):
        lines, length, timestamp, end_id = get_data(start_id)
        print('3 %d' % start_id)
        if not timestamp:
            if start_id < max_id:
                start_id += DOCS_PER_REQ
                write_conf(conf, "%d,%d,%d,%d" % (max_id, start_id, end_id, DOCS_PER_REQ))
            continue
        if not save_file:
            date = datetime.datetime.fromtimestamp(int(timestamp)/1000).strftime("%Y%m%d_%H%M%S") + ".gz"
            save_file = save_dir + date

        try:
            write_conf(conf, "%d,%d,%d,%d" % (max_id, start_id, end_id, DOCS_PER_REQ))
            content += lines + '\n'
        except Exception as e:
            msg = "Exception %s" % str(e)
            logging.error(msg)

        start_id += DOCS_PER_REQ
        # if max_id - start_id > 100:
        #     start_id += 100
        # else:
        #     start_id += 1 if length == 0 else length
        # if length > 0:
        #     start_id = end_id
        # elif length == 0 and max_id - start_id > DOCS_PER_REQ:
        #     start_id += DOCS_PER_REQ
        # else:
        #     start_id += 1
        # print('4 %d ' % start_id)
    try:
        write_gzip(save_file, content)
    except Exception as e:
        msg = 'Error: ' + str(e)
        logging.error(msg)
        print(msg)
    return start_id


def download_all(save_dir, conf):
    start_id, max_id, min_id, end_id = read_conf(conf)
    start_id = max(min_id, end_id)
    N = 25
    for i in range(0, N):
        start_id = download(start_id, max_id, save_dir, conf, LINE_PER_FILE)


def keep_one_instance():
    file = os.path.basename(__file__)
    processes = [d for d in os.popen("ps aux | grep %s" % file).read().split("\n")]
    result = filter(lambda x: "grep" not in x, processes)
    n = sum([file in d for d in result])
    if n > 1:
        msg = "another process is running %s" % file
        print(msg)
        exit(1)


if __name__ == "__main__":
    keep_one_instance()
    conf = sys.argv[1]
    save_dir = sys.argv[2]
    log_dir = sys.argv[3]

    log_timestamp = str(datetime.datetime.today())
    logfile = log_dir + '/' + os.path.basename(__file__).split('.')[0] + '-' + log_timestamp + '.log'
    # write logs
    logging.basicConfig(filename=logfile, level=logging.ERROR, format='%(asctime)s %(levelname)s:%(message)s')

    download_all(save_dir, conf)
