import time
from operator import itemgetter 
from itertools import groupby 
from urllib.parse import urlparse,parse_qsl 

import requests as rq
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine("mysql+pymysql://server_log:qriUTu5iNt3IKuwMsxGW@192.168.229.18:5029/server_log", max_overflow=5)


#从指定地址返回json格式的数据
def rq_data(url,payload):
    r = rq.get(url,params=payload)
    return r.json()

#格式化数据
def format_list_to_dataframe(lst):
    dic = {}
    for d in lst:
        for k in d:
            if k not in dic: dic[k] = []
            dic[k].append(d[k])
    return pd.DataFrame(dic)

#导出csv
def export_csv(df,name):
    df.to_csv('csv/'+name+".csv",index=False,sep=',')

#去重
def uniq(lst):
    data = []
    for i in lst:
        if i not in data:
            data.append(i)
    return data

#去重列表
def uniq_list(lst,key):
    key_list = []
    res = []
    for dic in lst:
        key_val = dic[key]
        if key_val not in key_list:
            key_list.append(key_val)
            res.append(dic)
    return res

def pd_col_list(df,col):
    lst = df[col].values.tolist()
    return lst

#连接字符串
def str_list_to_str(lst):
    return '"'+'","'.join(lst)+'"'

def num_list_to_str(lst):
    return ','.join([str(n) for n in lst])

#查询数据 
def select_data(sql):
    result = engine.execute(sql)
    rows = result.fetchall()
    items = []
    for row in rows:
        items.append(dict(zip(row.keys(),row.values())))
    return items

def valid_custom_id_filter_fun(custom_id):
    return custom_id>0


month = '201807'
day = '2018-07-20'

'''
#从服务器请求数据
ly_list = rq_data('http://u.2958.cn/statistic/invalid.php',{'day':day,'type':1,'filter':1})
ly_list_df = format_list_to_dataframe(ly_list)
export_csv(ly_list_df,'ly_invalid')
'''

# ly_list_df = pd.read_csv('ly_invalid.csv')

'''
uids =  uniq(pd_col_list(ly_list_df,'uid'))
his_sql = "SELECT ip,uid,page_url AS fromurl,refer_page_url AS reffer_url,gid AS custom_id,display_res AS screen,CONCAT_WS(' ',os_name,os_major) AS os,CONCAT_WS(' ',browser_name,browser_major) AS browser,device_vender AS device,datetime AS time,extra07 AS iswx FROM DB_ALL_PV_%s WHERE day='%s' AND uid IN (%s)" % (month,day,str_list_to_str(uids))
his_list = select_data(his_sql)
his_list_df = format_list_to_dataframe(his_list)
export_csv(his_list_df,'his_list')
'''

# his_list_df = pd.read_csv('his_list.csv')

'''
#从服务器获取客户信息
custom_ids = list(filter(valid_custom_id_filter_fun,uniq(pd_col_list(ly_list_df,'custom_id')+pd_col_list(his_list_df,'custom_id'))))
custom_list = rq_data('http://gballs.2958.cn/api/Seo/custom.php',{'key':'POHdrVr7S4Mr7o9','function':'getCustoms','custom_id':num_list_to_str(custom_ids)})
custom_list_df = format_list_to_dataframe(uniq_list(custom_list['data'],'custom_id'))
export_csv(custom_list_df,'custom_list')
'''

# custom_list_df = pd.read_csv('custom_list.csv')

'''
#从数据库获取ip地址信息
ips = uniq(pd_col_list(ly_list_df,'ip')+pd_col_list(his_list_df,'ip'))
ip_sql = "SELECT ip,province,city FROM geolocation_log WHERE ip IN(%s) GROUP BY ip" % str_list_to_str(ips)
ip_address_list = select_data(ip_sql)
ip_address_list_df = format_list_to_dataframe(ip_address_list)
export_csv(ip_address_list_df,'ip_address')
'''

# ip_address_list_df = pd.read_csv('ip_address.csv')

'''
ly_data_df = pd.merge(pd.merge(ly_list_df,custom_list_df),ip_address_list_df)
export_csv(ly_data_df,'ly_data')

his_data_df = pd.merge(pd.merge(his_list_df,custom_list_df),ip_address_list_df)
export_csv(his_data_df,'his_data')
'''

exclude_topdomains = ['com.cn','org.cn','net.cn']

def df_time_to_timestamp(row):
    return int(time.mktime(time.strptime(row['time'], "%Y-%m-%d %H:%M:%S")))


def parse_url(url):
    p = urlparse(url)
    q = parse_qsl(p.query)
    return { 'scheme':p.scheme,'hostname':p.netloc,'query':dict(q) }

def df_fromurl_to_hostname(row):
    p = parse_url(row['fromurl'])
    return p['hostname']

def df_hostname_to_domain(row):
    host = row['hostname']
    lst = host.split('.')
    l = len(lst)
    if l<3 :
        return host
    else:
        domain = lst[len-2]+'.'+lst[len-1]
        if domain in exclude_topdomains:
            return lst[len-3]+'.'+domain
        else:
            return domain

def df_fromurl_to_sid(row):
    p = parse_url(row['fromurl'])
    q = p['query']
    if 'sid' in q:
        return q['sid']
    elif 'Sid' in q:
        return q['Sid']
    else:
        return ''

def df_fromurl_to_JJID(row):
    p = parse_url(row['fromurl'])
    q = p['query']
    if 'JJID' in q:
        return q['JJID']
    else:
        return ''

#将留言数据和历史访问数据转换为列表
ly_data_df = pd.read_csv('ly_data.csv')
ly_data_df.insert(loc=0,column='timestamp',value=ly_data_df.apply(df_time_to_timestamp,axis='columns'))
ly_data_df.insert(loc=0,column='hostname',value=ly_data_df.apply(df_fromurl_to_hostname,axis='columns'))
# ly_data_df.insert(loc=0,column='domain',value=ly_data_df.apply(df_hostname_to_domain,axis='columns'))
ly_data_df.insert(loc=0,column='sid',value=ly_data_df.apply(df_fromurl_to_sid,axis='columns'))
ly_data_df.insert(loc=0,column='JJID',value=ly_data_df.apply(df_fromurl_to_JJID,axis='columns'))

ly_data_df = ly_data_df.drop(['type','custom_id','siteinfo_id','id','time','ip','industry_id2','industry_id'],1)
ly_data_list = ly_data_df.to_dict(orient='records')

lst = ly_data_list[0]['hostname'].split('.')
l = len(lst)
print(l)
'''
#把留言数据按照uid分组
ly_data_list.sort(key=itemgetter('uid'))
lstg = groupby(ly_data_list,itemgetter('uid')) 

#将分组结果解析为字典
ly_data_dict = dict([(key,{ 'lst':list(group) }) for key,group in lstg])
'''

'''
his_data_df = pd.read_csv('his_data.csv')
his_data_df.insert(loc=0,column='timestamp',value=his_data_df.apply(df_time_to_timestamp,axis='columns'))
his_data_list = his_data_df.to_dict(orient='records')

#把历史访问数据按照uid分组
his_data_list.sort(key=itemgetter('uid'))
lstg = groupby(his_data_list,itemgetter('uid')) 

#将分组结果解析为字典
his_data_dict = dict([(key,list(group)) for key,group in lstg])

'''

'''
res = {}
uids_ly_count = {}

#迭代每一条留言
for i,ly in enumerate(ly_data_list):
    id = ly['id']
    uid = ly['uid']
    fromurl = ly['fromurl']
    time = ly['time']
    ts = ly['timestamp']
    
    #查找对应的历史记录
    if (uid in his_data_dict) and (i==3):
        for his in his_data_dict[uid]:
            if his['url'] in fromurl:
                print(ts-his['timestamp'])
'''
