#! /usr/bin/env python3
# -*- coding: utf-8 -*-

from urllib import request
from urllib import error
from urllib import parse
from http import cookiejar
import sys
import json
import re
import os
import time
import csv_billing

cfg = {}

def load_config():
    base_dir = str(os.path.split(os.path.realpath(__file__))[0])
    fp = open(base_dir + "/config.json", "r")
    config = json.load(fp)
    fp.close()
    return config


def get_opener():
    url = cfg['url']
    login_form_data = cfg['login_form']
    loging_post_data = parse.urlencode(login_form_data).encode('utf-8')
    user_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'
    head = {'User-Agnet': user_agent, 'Connection': 'keep-alive'}
    cookie = cookiejar.CookieJar()
    cookie_support = request.HTTPCookieProcessor(cookie)
    opener = request.build_opener(cookie_support)
    login_req = request.Request(url=url, data=loging_post_data, headers=head)
    try:
        response1 = opener.open(login_req)
    except error.URLError as e:
        if hasattr(e, 'code'):
            print("HTTPError:%d" % e.code)
        elif hasattr(e, 'reason'):
            print("URLError:%s" % e.reason)
    return opener


def get_out_str_list(obj):
    opener = obj['opener']
    path = 'graph_view.php?action=tree&tree_id=0&leaf_id='
    url = cfg['url'] + path
    try:
        rtn = opener.open(url)
        data = rtn.read().decode('utf-8')
    except error.URLError as e:
        if hasattr(e, 'code'):
            print("HTTPError:%d" % e.code)
        elif hasattr(e, 'reason'):
            print("URLError:%s" % e.reason)
    rtn = re.findall(r".*insFld.*", data, re.M)
    return rtn


def get_data(obj):
    opener = obj['opener']
    path = obj['path']
    url = cfg['url'] + path
    try:
        rtn = opener.open(url)
        data = rtn.read()
    except error.URLError as e:
        if hasattr(e, 'code'):
            print("HTTPError:%d" % e.code)
        elif hasattr(e, 'reason'):
            print("URLError:%s" % e.reason)
    return data

def get_ou_num(obj):
    sh = obj['sh']
    rtn = re.search(r'^ou\d+', sh)
    if rtn != None:
        rtn = rtn.span()
        rtn = sh[rtn[0]+2:rtn[1]]
        return int(rtn)
    else:
        return None

def get_ou_name(obj):
    sh = obj['sh']
    rtn = re.search(r'gFld\(".*",', sh)#.span()
    if rtn != None:
        rtn = rtn.span()
        rtn = sh[rtn[0]+6:rtn[1]-2]
        rtn = rtn.replace('/', '_')
        rtn = rtn.strip()
        return rtn
    else:
        return None
    

def get_ou_path(obj):
    sh = obj['sh']
    rtn = re.search(r', ".*"\)\)', sh)#.span()
    if rtn != None:
        rtn = rtn.span()
        rtn = sh[rtn[0]+3:rtn[1]-3]
        rtn = rtn.replace(';', '&')
        return rtn
    else:
        return None


#爬小姐姐图片
def get_pic(obj):
    name = obj['name']
    job_id = obj['job_id']
    end_time = obj['end_time']
    opener = obj['opener']
    offset = obj['offset']

    v = {}
    v['local_graph_id'] = job_id
    v['view_type'] = "tree"
    v['rra_id'] = '0'
    v['graph_start'] = str(int(end_time) - 3600 * 24 * offset)
    v['graph_end'] = end_time
    dv = parse.urlencode(v)
    path = 'graph_image.php?' + dv
    gdt = get_data({'opener':opener, 'path':path})
    filename = name + '.png'
    fp = open(filename, 'wb+')
    fp.write(gdt)
    fp.close()

#爬小姐姐价格表
def get_excl(obj):
    name = obj['name']
    job_id = obj['job_id']
    end_time = obj['end_time']
    opener = obj['opener']
    offset = obj['offset']

    v = {}
    v['local_graph_id'] = job_id
    v['view_type'] = "tree"
    v['rra_id'] = '0'
    v['graph_start'] = str(int(end_time) - 3600 * 24 * offset)
    v['graph_end'] = end_time
    dv = parse.urlencode(v)
    path = 'graph_xport.php?' + dv
    gdt = get_data({'opener':opener, 'path':path})
    filename = name + '.csv'
    fp = open(filename, 'wb+')
    fp.write(gdt)
    fp.close()
    csv_billing.main_run({'filename':filename})


def get_obj(obj):
    data = obj['data']
    end_time = obj['end_time']
    offset = obj['offset']
    opener = obj['opener']
    pics = re.findall(r".*graphimage.*", data, re.M)
    for x in pics:
        #获取业务id
        rtn_id = re.search(r'local_graph_id=\d+', x)#.span()
        if rtn_id == None:
            continue
        else:
            rtn_id = rtn_id.span()
        rtn_id = x[rtn_id[0]+15:rtn_id[1]]
        #获取业务名称
        rtn_name = re.search(r'alt=\'.*\'></a>', x)#.span()
        if rtn_name == None:
            continue
        else:
            rtn_name = rtn_name.span()
        rtn_name = x[rtn_name[0]+5:rtn_name[1]-6]
        rtn_name = rtn_name.replace('/', '_')
        rtn_name = rtn_name.replace(' ', '')
        get_pic({'name':rtn_name, 'job_id':rtn_id, 'end_time':end_time, 'opener':opener, 'offset':offset})
        get_excl({'name':rtn_name, 'job_id':rtn_id, 'end_time':end_time, 'opener':opener, 'offset':offset})


def ergodic_tree(obj):
    str_list_tree = obj['str_list_tree']
    opener = obj['opener']
    end_time = obj['end_time']
    offset = obj['offset']
    now_num = -1
    for i, sh in enumerate(str_list_tree):
        ounum = get_ou_num({'sh':str_list_tree[i]})
        ouname = get_ou_name({'sh':str_list_tree[i]})
        oupath = get_ou_path({'sh':str_list_tree[i]})
        if ounum == None or ouname == None or oupath == None:
            continue
        if now_num < ounum:
            if not os.path.exists(ouname):
                os.mkdir(ouname)
            os.chdir(ouname)
            now_num = ounum
        elif now_num > ounum:
            os.chdir('..')
            ii = now_num - ounum
            while ii:
                os.chdir('..')
                ii = ii - 1
            now_num = ounum
            if not os.path.exists(ouname):
                os.mkdir(ouname)
            os.chdir(ouname)
        else:
            os.chdir('..')
            if not os.path.exists(ouname):
                os.mkdir(ouname)
            os.chdir(ouname)
        #爬小姐姐网址
        data = get_data({'opener':opener, 'path':oupath})
        #获取图片和表格
        get_obj({'data':data.decode('utf-8'), 'end_time':end_time, 'offset':offset, 'opener':opener})


def run_pa(obj):
    end_time = obj['end_time']
    offset = int(obj['offset'])
    directory = obj['directory']
    tm_list = time.strptime(end_time, "%Y-%m-%d")
    end_time = int(time.mktime(tm_list))

    #加载配置
    global cfg
    cfg = load_config()
    #cd到目标目录
    if not os.path.exists(directory):
        os.makedirs(directory)
        os.chdir(directory)
        #获取opener
        opener = get_opener()  
        #获取要爬的网址树
        str_list_tree = get_out_str_list({'opener':opener})   
        #遍历要爬的网址、爬图片和表格、并且创建目录树
        ergodic_tree({'str_list_tree':str_list_tree, 'opener':opener, 'end_time':end_time, 'offset':offset})   

def main_pa():
    #参数参数
    if len(sys.argv) != 4:
        print("请输入正确参数，如：./pa.py 2017-01-05 7  ./")
        sys.exit(-1)
    rtn = re.search(r'\d{4}-\d{2}-\d{2}', sys.argv[1])
    if rtn == None:
        print("请输入正确参数，如：./pa.py 2017-01-05 7  ./")
        sys.exit(-1)
    rtn = re.search(r'\d+', sys.argv[2])
    if rtn == None:
        print("请输入正确参数，如：./pa.py 2017-01-05 7  ./")
        sys.exit(-1)
    end_time = sys.argv[1]
    offset = sys.argv[2]
    directory = sys.argv[3]
    run_pa({'end_time':end_time, 'offset':offset, 'directory':directory})

if __name__ == '__main__':
    main_pa()
