import re
import requests
import csv
import pandas as pd
import numpy as np
import time
import json
from datetime import datetime
from datetime import timedelta
import requests
import time
import functools
import threading
import pymysql
from apscheduler.schedulers.background import BackgroundScheduler

###计算主线程的运行时间
def runtime(func):  # func要接收一个callable对象
    print("this is run time")

    @functools.wraps(func)  # 保留传递进来函数的元数据，将他的元数据赋值给inner
    def inner(*args, **kwargs):  # 所以把功能写在inner里就行
        start = time.time()
        result = func(*args, **kwargs)  # 要将原函数的返回值保存起来
        end = time.time()
        print(f"函数执行花了{end - start}s")
        return result

    return inner


def get_content(url, i):
    text = requests.get(url).content
    time.sleep(5 - i)
    print("get content", i)


def pachong_jt(key, city, road_name, T0_time):
    T0_time = 202301261635
    url = 'https://api.map.baidu.com/traffic/v1/road?'
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                      " Chrome/93.0.4577.63 Safari/537.36"
    }
    param = {
        'ak': key,
        'road_name': road_name,
        'city': city
    }
    resp = requests.get(url=url, params=param, headers=headers)
    page_content = resp.text
    response = json.loads(page_content)
    host = 'rm-bp11161odccsh8gopfo.mysql.rds.aliyuncs.com'
    user = 'projectEF'
    passwd = 'renke2022'
    port = 3306
    db = 'crawler_traffic'
    biaoming = 'crawler_copy1'
    conn = pymysql.connect(host=host,
                           user=user,
                           passwd=passwd,
                           database=db,
                           port=port,
                           charset='utf8')
    # crawler_copy1
    # id
    observe_time = T0_time
    status = response['status']
    if status == 0:
        description = response['description']
        evaluation = response['evaluation']
        road_traffic = response['road_traffic']
        evaluation_status = response['evaluation']['status']
        with conn.cursor() as cursor:
            table_ins = f'INSERT INTO {biaoming} (observe_time,status,description,evaluation,road_traffic,evaluation_status,' \
                        f'road_name,town_no) values({int(observe_time)}, {int(status)}, "{str(description)}", "{str(evaluation)}", ' \
                        f'"{str(road_traffic)}", {int(evaluation_status)},"{road_name}", {int(city)});'
            # print(table_ins)
            cursor.execute(table_ins)
            conn.commit()
    else:
        with conn.cursor() as cursor:
            table_ins = f'INSERT INTO {biaoming} (observe_time,status,road_name,town_no) ' \
                        f'values({int(observe_time)}, {int(status)}, "{road_name}", {int(city)});'
            # print(table_ins)
            cursor.execute(table_ins)
            conn.commit()


@runtime
def xunhuan(road_info_list, key, T0_time):
    for it_name, it_city in road_info_list:
        t = threading.Thread(target=pachong_jt, args=(key, it_city, it_name, T0_time))
        t.start()


# 通用模组


key = 'Ht37FLMMh0lPediymZhsWSekYHRwut0D'
road_info = pd.read_excel('./data/input/road_info.xlsx')
road_info_list = road_info[['road_name', 'city']].values
T0_time = 202301261635
t1 = time.time()
# for it_name, it_city in road_info_list:
#     t = threading.Thread(target=pachong_jt, args=(key, it_city, it_name, T0_time))
#     t.start()
# pachong_jt(key, it_city, it_name, T0_time)
xunhuan(road_info_list, key, T0_time)
t2 = time.time()
print(road_info.shape[0], t2 - t1, road_info.shape[0] / (t2 - t1))

scheduler = BackgroundScheduler(timezone="Asia/Shanghai")
scheduler.add_job(tick,
                  'cron',
                  second='*/10',
                  # hour='*/1',
                  args=(list_district, parameter_info, cpmeter, ncFilePath, test_mode, dst_info),
                  start_date="2022-08-15 10:36:00", )
scheduler.start()
# main()
# key = 'Ht37FLMMh0lPediymZhsWSekYHRwut0D'
# city = '330112'
# road_name = '竹林街'
# a, b = pachong_jt(key, city, road_name)

# ak = 'Ht37FLMMh0lPediymZhsWSekYHRwut0D'
#
# list1 = ['保锦路', '临水路', '临青线', '苕溪北路', '东湖路', '竹林街', '马溪路', '环北路', '平山路', '大学路', '万马路', '西墅街',
#          '临天桥', '长桥路', '长西线', '望湖路', '新民街', '江桥路', '城中街', '钱王街', '天目路', '临天路', '苕溪南路', '锦江路',
#          '临东路', '衣锦街', '城中东街', '杨临线', '杭昱线', '青临线', '横潭路', '天屹路', '勤学路', '畔湖路']
# output = pd.DataFrame()
# for n, it in enumerate(list1):
#     print(n)
#     url = 'https://api.map.baidu.com/traffic/v1/road?'
#     road_name = it
#     city = '330112'# 临安区代码
#     headers = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
#                   " Chrome/93.0.4577.63 Safari/537.36"
#     }
#
#     param = {
#         'ak': ak,
#         'road_name': road_name,
#         'city': city
#     }
#     resp = requests.get(url=url, params=param, headers=headers)
#     page_content = resp.text
#     a = json.loads(page_content)
#     df = pd.DataFrame([a])
#     if n == 0:
#         output = df
#     else:
#         output = output.append(df, ignore_index=True)
#
# output.to_csv('3.csv', index=False, encoding='utf_8_sig')
