#coding:utf-8
# -*- coding: utf-8 -*-

from selenium import webdriver
from config.config import systemconfig,monitorconfig
from decorator.decorators import loginreqired,cookiesreqired
import requests
import json
import urllib
from requests.cookies import RequestsCookieJar
from utils import timeutils
import datetime,time
import gc

####################################日志部分##############################################
#监控模块
from utils import logutils
logger=logutils.getlogger(__file__)
######################################################################################

#全局变量,爬虫
spider=None
availablealiasjobdict=dict()
availablealiasjoblist=list()

#####################################################################################
#爬虫工具
class Htmlspider():
    #登录用的全局变量
    sysconfig =None
    browser=None
    httpsession = requests.session()
    httpsession.cookies=None
    csrftoken = None
    #全局变量，浏览器
    def __init__(self,loginurl,username,passwd):
        self.loginurl=loginurl
        self.username=username
        self.passwd=passwd
        self.sysconfig=systemconfig()
        self.browser = webdriver.PhantomJS(executable_path=self.sysconfig.executable_path)
        #分辨率设置
        self.browser.set_window_size(1920, 1080)
        #超时时间设置
        self.browser.set_page_load_timeout(systemconfig.page_load_timeout)
        self.browser.set_page_load_timeout(systemconfig.script_timeout)

    def dologin(self):
        logger.info('********************开始登录****************************')
        self.browser.get(self.loginurl)
        time.sleep(2)
        self.browser.find_element_by_id('id_username').send_keys(self.username)
        self.browser.find_element_by_id('id_password').send_keys(self.passwd)
        # 单击登录按钮
        # self.browser.find_element_by_id('login').submit()
        loginbtn=self.browser.find_element_by_id('login')
        try:
            loginbtn.click()
        except Exception as e:
            self.browser.delete_all_cookies()
            self.httpsession.cookies=None
            raise  Exception('登录失败:{}'.format(e))
        if len(self.browser.session_id)<len('d7brgn1k8xfd3dczb0xgttw7mzdzlsxm'):
            raise Exception('登录失败')

        time.sleep(2)
        logger.debug('登录成功')
        logger.debug('self.browser.get_cookies()={}'.format(self.browser.get_cookies()))
        logger.info('********************登录操作完成****************************')

    @loginreqired
    def gethtml(self,url=None):
        logger.info("****************************** gethtml ************************************")
        logger.info("url={}".format(url))
        if not url:
            raise Exception('传入的url为空！')
        logger.info('打开页面 {}'.format(url))
        # 设置浏览器需要打开的url
        try:
            self.browser.get(url)
        except Exception as e:
            logger.exception(e)
            self.browser.execute_script('window.stop()')
            content =None
            return content
        content = self.browser.page_source.encode('utf-8')
        return content

    def loadcookie(self):
        logger.info("****************************** 开始加载cookies ************************************")

        # requests添加cookie
        self.csrftoken = self.browser.get_cookie('csrftoken')['value']
        cookies = self.browser.get_cookies()
        logger.debug("cookies={}".format(cookies))
        with open("cookies.txt", "w") as fp:
            json.dump(cookies, fp)

        jar = RequestsCookieJar()
        with open("cookies.txt", "r") as fp:
            cookies = json.load(fp)
            for cookie in cookies:
                jar.set(cookie['name'], cookie['value'])
        self.httpsession.verify = False
        self.httpsession.cookies = jar

    def newloadcookie(self):
        logger.info("****************************** 开始新加载cookies ************************************")

        # requests添加cookie
        self.csrftoken = self.browser.get_cookie('csrftoken')['value']
        cookies = self.browser.get_cookies()

        jar = RequestsCookieJar()
        for cookie in cookies:
            jar.set(cookie['name'], cookie['value'])
            for k,v in cookie.items():
                if k=='name' or k=='value' or 'expires':
                    continue
                jar.set(k,v)
        self.httpsession.verify = False
        self.httpsession.cookies = jar

    @cookiesreqired
    def doGet(self, url=None, headers=None, paramList=None):
        logger.info("******************************doGet************************************")
        logger.info("url={}".format(url))
        if headers == None or len(headers) == 0:
            headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
                'Accept-Encoding': 'gzip, deflate',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache - Control': 'no - cache',
                'Connection': 'keep-alive',
                'Host': '10.133.235.155',
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.17 Safari/537.36'
            }
        elif 'X-CSRFToken' in headers:
            headers['X-CSRFToken']=self.csrftoken

        pdata=None
        res = None
        try:
            if pdata:
                res = self.httpsession.get(url, headers=headers,data=pdata)
            else:
                res = self.httpsession.get(url, headers=headers)
        except Exception as e:
            logger.error('Error code:{}'.format(str(e)))
            return
        return res.text

    @cookiesreqired
    def doPost(self,url=None,headers=None,paramList=None):
        logger.info("****************************** doPost ************************************")
        logger.info("url={}".format(url))

        if headers == None or len(headers) == 0:
            headers = {
                'Accept': '*/*',
                'Accept-Encoding': 'gzip, deflate',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Content-Length': '0',
                'Cache - Control': 'no - cache',
                'Connection': 'keep-alive',
                'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                'Host': '10.133.235.155',
                'From-Api': 'ng-webapp',
                'Language': 'zh-cn',
                'Origin': 'http://10.133.235.155',
                'Referer': 'http://10.133.235.155/zh-cn/entry/',
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.17 Safari/537.36',
                'X-CSRFToken': self.csrftoken
            }

        if paramList != None and 'Content-Length' in headers.keys():
            pdata = urllib.parse.urlencode(paramList, encoding='UTF-8', safe='+').encode('Utf-8')
            headers['Content-Length'] = str(len(pdata))

        if paramList == None and pdata != None and 'Content-Length' in headers.keys():
            headers['Content-Length'] = str(len(pdata))

        res = None
        try:
            res = self.httpsession.post(url, headers=headers, data=pdata)
        except Exception as e:
            logger.error('Error code:', str(e))
            return
        return res.text
    def close(self):
        if self.browser:
            self.browser.quit()
        if self.httpsession:
            self.httpsession.close()

    def __del__(self):
        if self.browser :
            self.browser.quit()
            logger.debug('浏览器关闭！')
        if self.httpsession:
            self.httpsession.close()
            logger.debug('httpsession关闭！')

def init():
    global spider
    # 创建爬虫对象
    spider = Htmlspider(loginurl='http://10.133.235.155/zh-cn/entry/', username='monitor', passwd='monitor')
    # 先登录一次主页，获取cookies
    spider.gethtml('http://10.133.235.155/zh-cn/entry/')

#获取所有可检测项
def getAvailableItem():
    global spider
    if not spider:
        init()
    logger.info("******************************获取所有可检测项************************************")

    # 全局变量：可用的检查项
    availableitemdict = dict()
    # 根据别名明明的可用检查项
    availableitemaliasdict = dict()

    availablealiasjobdict.clear()
    availablealiasjoblist.clear()

    # 抓取所有数据组合项目
    jsonstr = spider.doGet('http://10.133.235.155/api-backend/caps/?format=json')
    jsondata = json.loads(jsonstr)
    for item in jsondata:
        availableitemdict[(item['spv_display_name'].strip(),item['full_display_name'].strip())]=(item['spv_name'].strip(),item['name'].strip())

    for k,v in availableitemdict.items():
        availableitemaliasdict[(monitorconfig.aliasdict[k[0]] if k[0] in monitorconfig.aliasdict else k[0],monitorconfig.aliasdict[k[1]] if k[1] in monitorconfig.aliasdict else k[1])]=v

    #获取可用的别名命名的检测项dict和list
    for k,v in availableitemaliasdict.items():
        availablealiasjobdict['->'.join([k[0],k[1]])]=v
        availablealiasjoblist.append('->'.join([k[0],k[1]]))

    logger.debug('可用的availablealiasjobdict为：{}'.format(availablealiasjobdict))
    logger.debug('可用的availablealiasjoblist为：{}'.format(availablealiasjoblist))

    return availablealiasjobdict,availablealiasjoblist

#根据查询的joblist查询数据,starttimestamp和endtimestamp为统计时间
def querydata(joblist=None,starttimestamp=None,endtimestamp=None,jobtypelist=None):
    logger.info("****************************** 进入查询任务 ************************************")

    global spider
    if not spider:
        init()

    if not joblist:
        logger.error('未输入joblist')
        return None
    if not starttimestamp or not endtimestamp:
        logger.error('未输入starttimestamp或endtimestamp')
        return None
    if not jobtypelist:
        jobtypelist=monitorconfig.searchtypedict.keys()


    postDataList = []
    postDataList.append(('spvName', 'app15'))
    postDataList.append(('capName', 'cap3'))
    postDataList.append(('earliest', starttimestamp))
    postDataList.append(('latest', endtimestamp))
    postDataList.append(('metric', 1))
    postDataList.append(('ip_dst', ''))
    postDataList.append(('dim_trans_type', ''))
    postDataList.append(('dim_sub_trans_type', ''))
    postDataList.append(('search', 'trans_count_snapshot'))
    postDataList.append(('app_name', 'app15'))
    postDataList.append(('cap_name', 'cap3'))

    #首先获取所有的jobid
    # 开始按照joblist查询数据
    jobiddict=dict()
    for jobitem in joblist:
        spvName = availablealiasjobdict[jobitem][0]
        capName = availablealiasjobdict[jobitem][1]
        app_name = availablealiasjobdict[jobitem][0]
        cap_name = availablealiasjobdict[jobitem][1]
        postDataList[0]=('spvName', spvName)
        postDataList[1]=('capName', capName)
        postDataList[9] = ('app_name', app_name)
        postDataList[10] = ('cap_name', cap_name)
        for jobtype in jobtypelist:
            postDataList[8]=('search', jobtype)
            jsonstr = spider.doPost(url='http://10.133.235.155/zh-cn/apc/stat/task/{}/{}/'.format(app_name,cap_name), paramList=postDataList)
            logger.debug(jsonstr)
            jsondata = json.loads(jsonstr)
            jobid = jsondata['data']['job_id']
            jobiddict[(jobitem,app_name,cap_name,jobtype)]=jobid
            logger.debug('{}->{}'.format((jobitem, app_name, cap_name, jobtype), jobid))

    # for k,v in jobiddict.items():
    #     print(k,'->',v)

    logger.debug('##########################开始最终抓取数据########################################')
    #最终输出的结果
    outputdatadict=dict()

    #最终抓取数据
    #开始根据获取到的jobid查询数据
    # 获取当前时间戳
    timestamp = timeutils.datetime2timestamp(datetime.datetime.now())
    for jobkey,jobid in jobiddict.items():
        jobitem,app_name,cap_name,jobtype=jobkey
        url = 'http://10.133.235.155/zh-cn/apc/stat/task/{}/{}/{}/?_={}'.format(app_name,cap_name,jobid, timestamp)
        headers = {
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Content-Type': 'application/json',
            'From-Api': 'ng-webapp',
            'Host': '10.133.235.155',
            'Language': 'zh-cn',
            'Referer': 'http://10.133.235.155/zh-cn/entry/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
            'X-CSRFToken': ''
        }
        jsonstr = spider.doGet(url=url, headers=headers)
        jsondata = json.loads(jsonstr)
        logger.debug(jsondata)
        #如果服务没有准备好，则返回NA
        if not jsondata['ok']:
            logger.debug('{}->{}'.format(jobkey, 'NA'))
            outputdatadict[jobkey] ='NA'
            continue
        while jsondata['data']['isDone'] == False:
            url = 'http://10.133.235.155/zh-cn/apc/stat/task/{}/{}/{}/?_={}'.format(app_name, cap_name, jobid,timestamp)
            jsonstr = spider.doGet(url=url, headers=headers)
            jsondata = json.loads(jsonstr)
            time.sleep(1)
        logger.debug(jsondata)
        if len(jsondata['data']['result'])>0:
            logger.debug('{}->{}'.format(jobkey, jsondata['data']['result'][0][jobtype.replace('_snapshot', '')]))
            outputdatadict[jobkey] = jsondata['data']['result'][0][jobtype.replace('_snapshot', '')]
        else:
            logger.debug('{}->{}'.format(jobkey, 'NA'))
            outputdatadict[jobkey] = 'NA'
    return outputdatadict




def getData(joblist=None,starttime=None,endtime=None):
    global  spider
    if not joblist:
        joblist = monitorconfig.joblist
    # 开始查询数据
    try:
        if not starttime:
            startdatetime = datetime.datetime.strptime('2019-07-31 07:00', '%Y-%m-%d %H:%M')
            starttimestamp = timeutils.datetime2timestamp(startdatetime)
        else:
            startdatetime = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
            starttimestamp = timeutils.datetime2timestamp(startdatetime)
        if not endtime:
            enddatetime = datetime.datetime.strptime('2019-07-31 17:00', '%Y-%m-%d %H:%M')
            endtimestamp = timeutils.datetime2timestamp(enddatetime)
        else:
            enddatetime = datetime.datetime.strptime(endtime, '%Y-%m-%d %H:%M')
            endtimestamp = timeutils.datetime2timestamp(enddatetime)
    except Exception as e:
        raise Exception('日期格式错误{}'.format(e))
        return

    if not spider:
        init()
    #抓取所有可用的数据组合项目
    getAvailableItem()
    #查询数据
    outputdatadict=querydata(joblist=joblist,starttimestamp=starttimestamp,endtimestamp=endtimestamp)

    for k,v in outputdatadict.items():
        logger.debug('{}->{}'.format(k,v))
    # 关闭爬虫
    return outputdatadict

def generate_talbedata(joblist=None, starttime=None, endtime=None):
    logger.debug('##########################进入表格数据生成########################################')
    logger.debug('joblist={}'.format(joblist))
    logger.debug('starttime={}'.format(starttime))
    logger.debug('endtime={}'.format(endtime))
    datadict = getData(joblist, starttime, endtime)
    datalist = [monitorconfig.tabletitles]
    tmpdict=dict()
    for k,v in datadict.items():
        tmptp=tuple(k[0].split('->'))
        if tmptp not in tmpdict:
            tmpdict[tmptp] = dict()
        itemdatadict=tmpdict[tmptp]

        if k[3] == 'trans_count_snapshot':
            if v == 'NA':
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = 'NA'
            else:
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = '{:.0f}'.format(v)
        if k[3] == 'succ_rate_snapshot':
            if v == 'NA':
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = 'NA'
            else:
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = '{:.2f}%'.format(v)
        if k[3] == 'duration_snapshot':
            if v == 'NA':
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = 'NA'
            else:
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = '{:.0f}'.format(v)
        if k[3] == 'rr_rate_snapshot':
            if v != 'NA':
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = '{:.2f}%'.format(v)
                if float(v) < 90.0:
                    itemdatadict['是否影响用户体验'] = '是'
                    if k[0] == '社媒入口（DMZ）->社媒入口':
                        itemdatadict['整改方案及责任方'] = """整改方案：建议细化总分公司各入口地址详单、完善天旦镜像流量、总分公司排查入口调用存在的问题；

                责任方：系统运营保障处、应用维护处、各分公司"""
                    if k[0] == '掌上服务后台（APP）->ecif':
                        itemdatadict['整改方案及责任方'] = """整改方案：建议结合日志分析调用响应情况、完善天旦监控镜像流量；

                责任方：系统运营保障处、应用维护处"""
                    if k[0] == '掌上服务后台（APP）->客户俱乐部':
                        itemdatadict['整改方案及责任方'] = """整改方案：建议结合日志分析调用响应情况、完善天旦监控镜像流量；

                责任方：系统运营保障处、应用维护处。"""
            else:
                itemdatadict[monitorconfig.searchtypedict[k[3]]] = 'NA'
    # for k,v in tmpdict.items():
    #     print(k,'->',v)
    for k,v in tmpdict.items():
        tmplist=[k[0],k[1]]
        for item in monitorconfig.tabletitles[2:]:
            if item in v:
                tmplist.append(v[item])
            else:
                tmplist.append('')
        datalist.append(tmplist)
    for item in datalist:
        logger.debug(item)
    return datalist

def clean():
    global spider
    if spider:
        spider.close()
        spider=None


if __name__=='__main__':
    tabledatalist=generate_talbedata()
