#!/usr/bin/env python
#!coding:utf-8

from flask import  Flask,jsonify,request
from flask_restful import  Api,Resource
from flask import Flask
import  requests
import  matplotlib.pyplot as plt
from threading import  Thread
import  numpy as np
import datetime


app=Flask(__name__)
api=Api(app=app)

class OlapThread(Thread):
   def __init__(self,func,args=()):
      '''
      :param func: 被测试的函数
      :param args: 被测试的函数的返回值
      '''
      super(OlapThread,self).__init__()
      self.func=func
      self.args=args

   def run(self) -> None:
      self.result=self.func(*self.args)

   def getResult(self):
      try:
         return self.result
      except BaseException as e:
         return e.args[0]


def targetURL(code,seconds,text,requestUrl,method=None,data=None,headers=None):
   '''
   高并发请求目标服务器
   :param code:协议状态码
   :param seconds:响应时间
   :param text:响应时间
   :param requestUrl: 请求地址
   :param requestData:请求参数
   :param method: 请求方法
   :return:
   '''
   if method=='GET':
      r=requests.get(url=requestUrl)
      print('输出信息昨状态码:{0},响应结果:{1}'.format(r.status_code,r.text))
      code=r.status_code
      seconds=r.elapsed.total_seconds()
      text=r.text
      return code,seconds,text
   elif method=='POST':
      r=requests.post(url=requestUrl,json=data,headers=headers)
      print('输出信息昨状态码:{0},响应结果:{1}'.format(r.status_code, r.text))
      code=r.status_code
      seconds=r.elapsed.total_seconds()
      text=r.text
      return code,seconds,text


def calculationTime(startTime,endTime):
   '''计算两个时间之差，单位是秒'''
   return (endTime-startTime).seconds

def getResult(seconds):
   '''获取服务端的响应时间信息'''
   data={
      'Max':sorted(seconds)[-1],
      'Min':sorted(seconds)[0],
      'Median':np.median(seconds),
      '99%Line':np.percentile(seconds,99),
      '95%Line':np.percentile(seconds,95),
      '90%Line':np.percentile(seconds,90)
   }
   return data

# def show(i,j):
#  '''
#  :param i: 请求总数
#  :param j: 请求响应时间列表
#  :return:
#  '''
#  fig,ax=plt.subplots()
#  ax.plot(list_count,seconds)
#  ax.set(xlabel='number of times', ylabel='Request time-consuming',
#         title='olap continuous request response time (seconds)')
#  ax.grid()
#  fig.savefig('target.png')
#  plt.show()

def highConcurrent(count,requestUrl,method,data,headers):
   '''
   对服务端发送高并发的请求
   :param count: 并发数
   :param requestData:请求参数
   :param requestUrl: 请求地址
   :param method:请求方法
   :param data:请求参数
   :param headers:请求头
   :return:
   '''
   startTime=datetime.datetime.now()
   sum=0
   list_count=list()
   tasks=list()
   results = list()
   #失败的信息
   fails=[]
   #成功任务数
   success=[]
   codes = list()
   seconds = list()
   texts=[]

   for i in range(0,count):
      t=OlapThread(targetURL,args=(i,i,i,requestUrl,method,data,headers))
      tasks.append(t)
      t.start()
      print('测试中:{0}'.format(i))

   for t in tasks:
      t.join()
      if t.getResult()[0]!=200:
         fails.append(t.getResult())
      results.append(t.getResult())

   for item in fails:
      print('请求失败的信息:\n',item[2])
   endTime=datetime.datetime.now()
   for item in results:
      codes.append(item[0])
      seconds.append(item[1])
      texts.append(item[2])
   for i in range(len(codes)):
      list_count.append(i)

   #生成可视化的趋势图
   fig,ax=plt.subplots()
   ax.plot(list_count,seconds)
   ax.set(xlabel='number of times', ylabel='Request time-consuming',
          title=' request response time (seconds)')
   ax.grid()
   fig.savefig('rs.png')
   plt.show()

   for i in seconds:
      sum+=i
   rate=sum/len(list_count)
   # print('\n总共持续时间:\n',endTime-startTime)
   totalTime=calculationTime(startTime=startTime,endTime=endTime)
   if totalTime<1:
      totalTime=1
   #吞吐量的计算
   try:
      throughput=int(len(list_count)/totalTime)
   except Exception as e:
      print(e.args[0])
   getResult(seconds=seconds)
   errorRate=0
   if len(fails)==0:
      errorRate=0.00
   else:
      errorRate=len(fails)/len(tasks)*100
   throughput=str(throughput)+'/S'
   timeData=getResult(seconds=seconds)
   # print('总耗时时间:',(endTime-startTime))
   timeConsuming=(endTime-startTime)
   return timeConsuming,throughput,rate,timeData,errorRate,len(list_count),len(fails)

class Interface(Resource):
   def get(self):
      return {'status':0,'msg':'ok','datas':[]}

   def post(self):
      if not request.json:
         return jsonify({'status':1001,'msg':'请求参数不是JSON的数据，请检查，谢谢！'})
      else:
         try:
            data={
               'count':request.json.get('count'),
               'method':request.json.get('method'),
               'requestUrl':request.json.get('requestUrl'),
               'data':request.json.get('data'),
               'headers':request.json.get('headers')
            }
            timeConsuming,throughput,rate,timeData,errorRate,sum,fails=highConcurrent(
               count=data['count'],
               requestUrl=data['requestUrl'],
               method=data['method'],
               data=data['data'],
               headers=data['headers']
            )
            print('执行总耗时:',timeConsuming)
            return  jsonify({'status':0,'msg': '请求成功','datas':[{
               '吞吐量':throughput,
               '平均响应时间':rate,
               '响应时间信息':timeData,
               '错误率':errorRate,
               '请求总数':sum,
               '失败数':fails
            }]}, 200)
         except Exception as e:
            return e.args[0]

api.add_resource(Interface,'/v1/interface')

if __name__ == '__main__':
   app.run(debug=True,port=5001,host='0.0.0.0')