import math
import random
import time
from operator import itemgetter 
from itertools import groupby 
from urllib.parse import urlparse,parse_qsl
import pandas as pd
import numpy as np
import requests as rq
from sqlalchemy import create_engine

engine = create_engine("mysql+pymysql://server_log:qriUTu5iNt3IKuwMsxGW@192.168.229.18:5029/server_log", max_overflow=5)


#去重
def uniq(lst):
    data = []
    for i in lst:
        if i not in data:
            data.append(i)
    return data

#去重列表
def uniq_list(lst,key):
    key_list = []
    res = []
    for dic in lst:
        key_val = dic[key]
        if key_val not in key_list:
            key_list.append(key_val)
            res.append(dic)
    return res

#将指定的列转换为列表
def pd_col_list(df,col):
    lst = df[col].values.tolist()
    return lst

#时间处理
def datetime_to_timestamp(dt):
    #时间 -> 时间戳
    return int(time.mktime(time.strptime(dt, "%Y-%m-%d %H:%M:%S")))

def day_to_timestamp(day):
    #日期 -> 时间戳
    return int(time.mktime(time.strptime(day, "%Y-%m-%d")))

def timestamp_to_day(ts):
    #时间戳 -> 日期
    return time.strftime("%Y-%m-%d", time.localtime(ts))

def timestamp_to_datetime(ts):
    #时间戳 -> 时间
    return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))

def timestamp_to_month(ts):
    #时间戳-> 月份
    return time.strftime("%Y%m", time.localtime(ts))

def timestamp_to_hour(ts):
    #时间戳 -> 小时
    return time.strftime("%H", time.localtime(ts))

def timestame_to_week(ts):
    #时间戳 -> 星期
    return time.strftime("%A", time.localtime(ts))

def day_to_month(day):
    #日期 -> 月份
    return timestamp_to_month(day_to_timestamp(day))

def day_to_week(day):
    #日期 -> 星期
    return timestame_to_week(day_to_timestamp(day))

def datetime_to_day(dt):
    #时间 -> 日期
    return timestamp_to_day(datetime_to_timestamp(dt))

def datetime_to_hour(dt):
    #时间 -> 小时
    return int(timestamp_to_hour(datetime_to_timestamp(dt)))

def datetime_to_week(dt):
    #时间戳 -> 星期
    return timestame_to_week(datetime_to_timestamp(dt))


def is_Chinese(word):
    for ch in word:
        if '\u4e00' <= ch <= '\u9fff':
            return True
    return False

#地址解析
def parse_url(url):
    p = urlparse(url)
    q = parse_qsl(p.query)
    return { 'scheme':p.scheme,'hostname':p.netloc,'query':dict(q),'path':p.path }

def find_query_field(q,fields):
    for field in fields:
        if field in q:
            return q[field]
    return np.nan

def find_query_kwd(q):
    if not q:
        return np.nan
    else:
        for k in q:
            if is_Chinese(q[k]): return q[k]
    return np.nan


def df_datetime_to_timestamp(row):
    return datetime_to_timestamp(row['datetime'])

def df_datetime_to_day(row):
    return  datetime_to_day(row['datetime'])

def df_datetime_to_hour(row):
    return datetime_to_hour(row['datetime'])

def df_merge_other_to_phone(row):
    if pd.isnull(row['phone']):
        return row['other']
    else:
        return row['phone']

def df_filter_topic(row):
    if row['topic']=='未找到此广告标题':
        return np.nan
    else:
        row['topic']

def df_fromurl_to_host(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return p['hostname']

exclude_topdomains = ['com.cn','org.cn','net.cn']

def df_hostname_to_domain(row):
    host = row['host']
    if pd.isnull(host):
        return np.nan
    else:
        lst = host.split('.')
        l = len(lst)
        if l<3 :
            return host
        else:
            domain = lst[l-2]+'.'+lst[l-1]
            if domain in exclude_topdomains:
                return lst[l-3]+'.'+domain
            else:
                return domain

def df_fromurl_to_sid(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return find_query_field(p['query'],['sid','Sid','sourceid'])

def df_fromurl_to_JJID(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return find_query_field(p['query'],['JJID'])

def df_fromurl_to_mediaid(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return find_query_field(p['query'],['mediaid'])

def df_fromurl_to_kwd(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return find_query_kwd(p['query'])

def df_fromurl_to_path(row):
    if pd.isnull(row['fromurl']):
        return np.nan
    else:
        p = parse_url(row['fromurl'])
        return 'http://'+p['hostname']+p['path']

def df_referer_to_fkwd(row):
    if pd.isnull(row['referer']):
        return np.nan
    else:
        p = parse_url(row['referer'])
        return find_query_kwd(p['query'])

def df_yesno_to_int(row):
    if pd.isnull(row['yesno']): 
        yesno = 0
    else:
        yesno = int(row['yesno'])
    if yesno==-1:
        return 1
    else:
        return 0


host_2958_wap = ['m.2958.cn','m.78go78.com','m.zszhandian.com','m.wangluozs.com','m.jiamengzs.com','wxapp.2958.cn','m.redian360.com','m.join178.cn',
'm.ab040.com','m.ab470.com','me.2958.cn','m.2958.org.cn','me.2958.org.cn','m.59xiang.com','m.2958.net.cn','wap.2958.net.cn','m.kilimanur.com',
'm.zhaoshangzs.com','me.2958.net.cn','jpsj.89178.cn','hs.2958.cn','3g.zlseo.com','m.ww698.com','m.yanse123.com','m.tata588.com','m.qudaocy.com',
'wap.yanse123.com','wap.tata588.com']

host_2958_pc = ['www.2958.cn','jp.2958.cn','sj.2958.cn','msj.2958.cn','sj.2958.org.cn','www.2958.net.cn','www.2958.org.cn','xm.redian360.com',
'www.ww698.com','www.kilimanur.com','www.ab040.com','fc.ww698.com','cyw.qudaocy.com','bd.zszhandian.com','bd.southpawsman.com','www.59xiang.com',
'ww.ab470.com','jiu.zlseo.com','jiu.78my78.cn','icy.zlseo.com','hot.s22.cn','hot.dolaws.net','hot.ab040.com','go.ab040.com','www.tata588.com',
'fc.tata588.com','cye.yanse123.com','cy.zlseo.com','www.redian360.com']

host_seo_wap = ['m.hot.36578.com','m.shang360.com','m.cy.78.cn','wap.zf.36578.com','m.hao.qudao.com','wap.china.qudao.com','m.hot.23.cn',
'm.news.89178.com','wap.sj.89178.com','wap.cy.89178.com','wap.9355.com','m.cy.2958.cn','wap.08956.com']

host_seo_pc = ['hot.36578.com','www.shang360.com','top.shang360.com','ask.shang360.com','qiye.shang360.com','news.shang360.com','canyin.shang360.com',
'fuzhuang.shang360.com','shipin.shang360.com','jiaju.shang360.com','jc.shang360.com','jiaoyu.shang360.com','meirong.shang360.com','baojian.shang360.com',
'yule.shang360.com','huanbao.shang360.com','keji.shang360.com','qita.shang360.com','cy.78.cn','zf.36578.com','hao.qudao.com','china.qudao.com',
'hot.23.cn','news.89178.com','sj.89178.com','www.08956.com','cy.89178.com','www.9355.com','cy.2958.cn']

def df_host_to_plat(row):
    host = row['host']
    if pd.isnull(host):
        return np.nan
    elif (host in host_2958_wap) or (host in host_2958_pc):
        return '2958'
    elif (host in host_seo_wap) or (host in host_seo_pc):
        return 'seo'
    else:
        return np.nan


def df_day_to_week(row):
    return day_to_week(row['day'])

#从指定地址返回json格式的数据
def rq_data(url,payload):
    r = rq.get(url,params=payload)
    return r.json()

#格式化数据
def format_list_to_dataframe(lst):
    dic = {}
    for d in lst:
        for k in d:
            if k not in dic: dic[k] = []
            dic[k].append(d[k])
    return pd.DataFrame(dic)

#连接字符串
def str_list_to_str(lst):
    return '"'+'","'.join(lst)+'"'

def num_list_to_str(lst):
    return ','.join([str(n) for n in lst])

#查询数据 
def select_data(sql):
    result = engine.execute(sql)
    rows = result.fetchall()
    items = []
    for row in rows:
        items.append(dict(zip(row.keys(),row.values())))
    return items

def get_custom_list(custom_ids):
    #获取客户信息
    custom_list = rq_data('http://gballs.2958.cn/api/Seo/custom.php',{'key':'POHdrVr7S4Mr7o9','function':'getCustoms','custom_id':num_list_to_str(custom_ids)})
    return format_list_to_dataframe(uniq_list(custom_list['data'],'custom_id'))

def get_ip_addr_list(ips):
    #获取ip地理信息
    ip_sql = "SELECT ip,province,city FROM geolocation_log WHERE ip IN(%s) GROUP BY ip" % str_list_to_str(ips)
    ip_address_list = select_data(ip_sql)
    return format_list_to_dataframe(ip_address_list)

def get_his_list(day,uids):
    #获取历史访问
    month = day_to_month(day)
    his_sql = "SELECT ip,uid,page_url AS fromurl,refer_page_url AS reffer_url,gid AS custom_id,display_res AS screen,CONCAT_WS(' ',os_name,os_major) AS os,CONCAT_WS(' ',browser_name,browser_major) AS browser,device_vender AS device,datetime AS time,extra07 AS iswx FROM DB_ALL_PV_%s WHERE day='%s' AND uid IN (%s)" % (month,day,str_list_to_str(uids))
    his_list = select_data(his_sql)
    return format_list_to_dataframe(his_list)

def valid_uid_filter_fun(uid):
    #uid验证筛选
    if pd.isnull(uid) or uid=='undefined':
        return False
    else:
        return True

def chunk(l,n):
    #列表分片
    return [l[i:i + n] for i in range(0, len(l), n)]

#导出csv
def export_csv(df,name):
    df.to_csv(name+".csv",index=False,sep=',')

def format_ly_data():

    df = pd.read_csv('2958_guestbook_tmp.csv',sep=';',encoding='gbk')

    df.rename(columns={'cookie':'uid','guestip':'ip','datetime1':'datetime','siteinfo_id':'r_sid','JJID':'r_JJID','province':'r_province'},inplace=True)

    df['topic'] = df.apply(df_filter_topic,axis=1)
    df['phone'] = df.apply(df_merge_other_to_phone,axis=1)

    #解析fromurl
    df['host'] = df.apply(df_fromurl_to_host,axis=1)
    df['domain'] = df.apply(df_hostname_to_domain,axis=1)
    df['sid'] = df.apply(df_fromurl_to_sid,axis=1)
    df['JJID'] = df.apply(df_fromurl_to_JJID,axis=1)
    df['mediaid'] = df.apply(df_fromurl_to_mediaid,axis=1)
    # df['kwd'] = df.apply(df_fromurl_to_kwd,axis=1)
    #解析referer
    # df['fkwd'] = df.apply(df_referer_to_fkwd,axis=1)

    #解析datetime
    df['timestamp'] = df.apply(df_datetime_to_timestamp,axis=1)
    df['day'] = df.apply(df_datetime_to_day,axis=1)
    df['hour'] = df.apply(df_datetime_to_hour,axis=1)

    df['yesno'] = df.apply(df_yesno_to_int,axis=1)

    df = df.drop(['type','banner_id','other','email','address','zipcode','typename','superid','checkcode','approve','datetime0','is_read','gyesno','version','rid','call_state','gbook_id','return_visit_time','add_time','main_id','plat','referer'],1)

    return df[(df['day']>='2018-07-01') & (df['day']<='2018-07-31')]

# ly_df = format_ly_data()

# export_csv(ly_df,'ly_data')
'''
ly_df = pd.read_csv('ly_data.csv')
lst = ly_df.to_dict(orient='records')
lst.sort(key=itemgetter('day'))
lstg = groupby(lst,itemgetter('day'))#按日期分组
lstg_dict = dict([(key,list(group)) for key,group in lstg])

df_lst = []

for day in lstg_dict:
    print(day,'start')
    uids = list(filter(valid_uid_filter_fun,uniq([ly['uid'] for ly in lstg_dict[day]])))
    sdf = get_his_list(day,uids)
    print(day,'end')
    df_lst.append(sdf)

his_df = pd.concat(df_lst,ignore_index=True)

export_csv(his_df,'his_data')
'''

# ly_df = pd.read_csv('ly_data.csv')
# his_df = pd.read_csv('his_data.csv')
'''
custom_ids = uniq(pd_col_list(ly_df,'custom_id'))+uniq(pd_col_list(his_df,'custom_id'))
custom_df = get_custom_list(custom_ids)
export_csv(custom_df,'custom_data')
'''
# custom_df = pd.read_csv('custom_data.csv')
'''
ips = uniq(pd_col_list(ly_df,'ip'))+uniq(pd_col_list(his_df,'ip'))
id_addr_df = get_ip_addr_list(ips)
export_csv(id_addr_df,'ip_addr_data')
'''
# id_addr_df = pd.read_csv('ip_addr_data.csv')
'''
df = pd.merge(pd.merge(ly_df,custom_df),id_addr_df)
df = df.drop(['industry_id','industry_id2'],1)
export_csv(df,'ly_data_full')
'''
'''
df = pd.merge(pd.merge(his_df,custom_df),id_addr_df)
df = df.drop(['industry_id','industry_id2'],1)
export_csv(df,'his_data_full')
'''

df = pd.read_csv('ly_data_full.csv')
print(df.columns)
#['id', 'custom_id', 'topic', 'guestname', 'phone', 'content', 'r_sid',
# 'ip', 'fromurl', 'datetime', 'yesno', 'uid', 'r_JJID', 'r_province',
# 'host', 'useragent', 'domain', 'sid', 'JJID', 'mediaid', 'timestamp',
# 'day', 'hour', 'custom_name', 'big_industry_name',
# 'small_industry_name', 'province', 'city']


df['content'] = df['content'].fillna('unknown')

#平台
df['plat'] = df.apply(df_host_to_plat,axis=1)
df['plat'] = df['plat'].fillna('unknown')
#访问的地址：不含参数的、http协议的纯路径
df['path'] = df.apply(df_fromurl_to_path,axis=1) 
df['week'] = df.apply(df_day_to_week,axis=1)

df['r_province'] = df['r_province'].fillna('unknown')
df['r_JJID'] = df['r_JJID'].fillna('unknown')
df['uid'] = df['uid'].fillna('undefined')
df['sid'] = df['sid'].fillna(0)
df['JJID'] = df['JJID'].fillna('unknown')
df['mediaid'] = df['mediaid'].fillna('unknown')

df = df.drop(['topic','fromurl','useragent'],1)

print(uniq(pd_col_list(df,'custom_name')))

lst = df.to_dict(orient='records')
l = len(lst)
idx = random.randint(0,l-1)
print(l,idx)
print(lst[idx])