# -*- coding:utf-8 -*-
import time

import requests
import json
import re
import csv
import os
import pandas as pd

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告
import warnings
warnings.filterwarnings('ignore')

"""
四川葛南仪器
http://cloud.mcu3.com/login.html
"""


class Test_Get():
    def __init__(self):
        self.headers = {
            # 'Accept': 'text/html, */*; q=0.01',
            # 'Accept-Encoding': 'gzip, deflate',
            # 'Accept-Language': 'zh-CN,zh;q=0.9',
            # 'Connection': 'keep-alive',
            # 'Content-Type': 'application/json',
            # 'DNT': '1',
            # 'Host': 'cloud.mcu3.com',
            # 'Origin': 'http://cloud.mcu3.com',
            # 'Referer': 'http://cloud.mcu3.com/login.html',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }
        self.session = requests.Session()

    def get_contents(self, station, startTime, endTime):
        strCookies = ""
        postData = {
            'loginUsername': 'gxdxmcu32',
            'loginPassword': 'gxdxmcu32',
        }
        url = f"http://cloud.mcu3.com/security-web/login.do"
        html = self.session.post(url, headers=self.headers, data=json.dumps(postData))
        data_login_json = json.loads(html.text)
        if data_login_json['code'] == 999:
            print("===========登录成功===========")
            strCookies = "sessionid=" + requests.utils.dict_from_cookiejar(html.cookies)['sessionid']
            self.headers['Cookie'] = strCookies
            print("===========获取数据===========")
            searchUrl = f"http://cloud.mcu3.com/second.htm?a1=&a2={station}&a3=&a4=&a5=&a6=&starttime={startTime} 00:00:00&endtime={endTime} 23:59:59&a7=100000"
            html_data = self.session.get(searchUrl, headers=self.headers)
            data_json = json.loads(re.findall(r"data: (.*?)//,", html_data.text, re.S)[0])
            dict_list = []
            for d in data_json:
                dict_data = dict()

                dict_data['序号'] = d['id']
                dict_data['测点设计名称'] = d['C']
                dict_data['采集时间'] = d['H']
                dict_data['采集值'] = d['J']
                dict_data['物理量'] = d['K']
                dict_data['是否越限'] = d['M']

                dict_list.append(dict_data)

            self.dict_to_csv("采集数据", dict_list)
            time.sleep(1)

    def dict_to_csv(self, filename: str, item: list):
        savefileName = "./" + filename + ".csv"  # "./result.csv"
        headList = ['序号','测点设计名称','采集时间','采集值','物理量','是否越限']  # 键list
        saveList = []  # 值list

        # 如果文件不存在，创建头
        if not os.path.exists(savefileName):
            with open(savefileName, "a", encoding="utf-8-sig", newline="") as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(headList)
        # 保存item中的数据
        with open(savefileName, "a", encoding="utf-8-sig", newline="") as csvfile:
            writer = csv.writer(csvfile)
            # writer.writerow(saveList)
            for obj in item:
                saveList = []
                for k, v in obj.items():
                    saveList.append(v)
                writer.writerow(saveList)
                print(str(saveList))

    def cleanData(self):
        df = pd.read_csv("采集数据.csv")
        station = ""
        station_t = ""
        dict_data = dict()

        # 提取已有站点名称进行分别数据组合
        df_group = df['测点设计名称'].unique()
        for s in df_group:
            if s.find("-T") < 0:
                station = s
                station_t = s.replace('\t', '') + "-T"
                df_s = df[df['测点设计名称'] == station]
                df_t = df[df['测点设计名称'].str.contains(station_t)]
                df_s_h = df_s[['采集值']]
                df_t_h = df_t[['序号', '采集值']]
                df_t_h.rename(columns={'采集值': '采集值_T'}, inplace=True)
                data = pd.merge(df_s, df_t_h)
                data.eval('公式 = -物理量+(采集值_T-20)*10', inplace=True)
                # data['公式'] = data.apply(lambda x: x['物理量'] + (x['采集值_T'] - 20) * 20)
                data = data[['序号', '测点设计名称', '采集时间', '采集值', '采集值_T', '物理量', '是否越限', '公式']]
                x = data.copy(deep=True)
                dict_data[f'{station}'] = x
                # data.to_excel('./整理后数据.xlsx', encoding='utf_8_sig')
        with pd.ExcelWriter('./整理后数据.xlsx') as writer:
            for key, value in dict_data.items():
                dict_data[key].to_excel(writer, sheet_name=key)

    def get_type(self):
        list_station = []
        with open("站点参数.csv", "r", encoding="utf-8-sig", newline="") as f:
            reader = csv.reader(f)
            for line in reader:
                list_station.append(line)

            return list_station


if __name__ == '__main__':
    test_get = Test_Get()
    flag = input("===========1.提取数据  2.整理数据:")
    match flag:
        case "1":
            startTime = input("===========输入起始时间：如【2022-01-01】:")
            endTime = input("===========输入终止时间：如【2022-01-01】:")
            if startTime != "" and endTime != "":
                print("===========提取测点信息===========")
                list_station = test_get.get_type()
                for s in list_station:
                    test_get.get_contents(s[0], startTime, endTime)
                input("===========数据【提取】完成，按任意键退出===========")
            else:
                print("===========输入时间===========")
        case "2":
            test_get.cleanData()
            input("===========数据【整理】完成，按任意键退出===========")
