import time
import csv
import os
import json
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
import requests
from bs4 import BeautifulSoup
from lxml import html
import pandas as pd


class get_Diagnosis_Info:
    def __init__(self, url, username, password, output_file):
        self.url = url
        self.username = username
        self.password = password
        self.output_file = output_file

    def get_login(self):
        # 创建Edge浏览器实例
        options = webdriver.EdgeOptions()
        options.add_argument('--headless')  # 无界面模式

        self.driver = webdriver.Edge(options=options)
        self.driver.get(self.url)

        # 输入用户名和密码  并点击登录
        self.driver.find_element(By.ID, 'loginname').send_keys(self.username)
        self.driver.find_element(By.ID, 'password').send_keys(self.password)
        self.driver.find_element(By.ID, 'to-recover').click()

        # 等待页面加载完成
        time.sleep(5)

        # 将cookie保存到requests中，供requests使用
        cookies = self.driver.get_cookies()
        self.driver.quit()
        return {cookie['name']: cookie['value'] for cookie in cookies}
    def get_data(self, cookies,vin,imei):
        # 使用requests获取数据
        url = 'https://diagnosis.kion-cn.com/lindediagnosis/kccuDevice/findjsonstr'
        # 请求头
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate, br, zstd",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Connection": "keep-alive",
            "Content-Type": "application/json",
            "Host": "diagnosis.kion-cn.com",
            "Origin": "https://diagnosis.kion-cn.com",
            "Referer": "https://diagnosis.kion-cn.com/lindediagnosis/kccuDevice/list",
            "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
            "Sec-CH-UA-Mobile": "?0",
            "Sec-CH-UA-Platform": '"Windows"',
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
            "X-Requested-With": "XMLHttpRequest"
        }
        data ={"vin":vin,"imei":imei}
        # 发送POST请求
        response = requests.post(url, headers=headers,cookies=cookies, json=data)
        print(response.text)
        return response.text


        # 将数据写入文件
        with open(self.output_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        return data

    def upload_data(self, file_path,cookies):
        url = "https://diagnosis.kion-cn.com/lindediagnosis/fileUpload/upload.do?uploadType=app"
        # 打开文件
        with open(file_path, 'rb') as f:
            file_content = f.read()

        files = {'file': (file_path, file_content, 'multipart/form-data')}
        # 构建请求头
        headers = {
                'Accept': 'application/json',
                'Accept-Encoding': 'gzip, deflate, br, zstd',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
                'Cache-Control': 'no-cache',
                'Connection': 'keep-alive',
                'Host': 'diagnosis.kion-cn.com',
                'Origin': 'https://diagnosis.kion-cn.com',
                'Referer': 'https://diagnosis.kion-cn.com/lindediagnosis/writePacket/toadd.do',
                'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
                'Sec-CH-UA-Mobile': '?0',
                'Sec-CH-UA-Platform': '"Windows"',
                'Sec-Fetch-Dest': 'empty',
                'Sec-Fetch-Mode': 'cors',
                'Sec-Fetch-Site': 'same-origin',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
                'X-Requested-With': 'XMLHttpRequest'
            }
        # 发送POST请求
        response = requests.post(url, headers=headers, files=files,cookies=cookies)
        print(response.text)

    def add_new_packet(self, cookies):
        url = "https://diagnosis.kion-cn.com/lindediagnosis/writePacket/add"
        headers = {
        'Access-Control-Allow-Headers': 'Keep-Alive,User-Agent,X-Requested-With,Cache-Control,Content-Type,Authorization',
        'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
        'Access-Control-Allow-Origin': 'https://diagnosis.kion-cn.com',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Content-Security-Policy': "default-src 'self' data: 'unsafe-inline' 'unsafe-eval' https://prod-29.westeurope.logic.azure.com",
        'Content-Type': 'application/json;charset=UTF-8',
        'Date': 'Sun, 15 Sep 2024 13:51:53 GMT',
        'Server': 'nginx/1.24.0',
        'Strict-Transport-Security': 'max-age=15768000',
        'Transfer-Encoding': 'chunked',
        'X-Content-Type-Options': 'nosniff',
        'X-Frame-Options': 'SAMEORIGIN',
        'X-XSS-Protection': '1; mode=block',
        'Accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Host': 'diagnosis.kion-cn.com',
        'Origin': 'https://diagnosis.kion-cn.com',
        'Referer': 'https://diagnosis.kion-cn.com/lindediagnosis/writePacket/toadd.do',
        'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
        'Sec-CH-UA-Mobile': '?0',
        'Sec-CH-UA-Platform': '"Windows"',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
        'X-Requested-With': 'XMLHttpRequest'
        }
        payload = {
                    'id': '',  # 如果id有具体值，请替换为空字符串
                    'remark_rich_text': 'noneedreplacexml',
                    'file_type': '1',
                    'file_name': 'Demo1111 with 无感升级5666',
                    'ft': '1',
                    'pk_file': '2943,2942,2945,2944',
                    'remark_text': '无感升级试验ddddddv'
        }

        result = requests.post(url, headers=headers, cookies=cookies, data=payload)
        print(result.text)

    def get_data_info(self,url):
        response = requests.get(url, cookies=cookies)
        response.raise_for_status()
        # 使用BeautifulSoup解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')

        text_dict = {}
        file_name = soup.find("input", {"id": "file_name"})["value"]
        file_type = soup.find("select", {"id": "file_type"}).find_all("option")[-1]["value"]  # 选择的是选中的选项
        remark_text = soup.find("input", {"id": "remark_text"})["value"]
        rich_text_content = soup.find("div", {"id": "remark_rich_text_div"}).get_text(separator='\n')
        text_dict['OTA名称'] = file_name
        text_dict['OTA类型'] = file_type
        text_dict['备注'] = remark_text
        text_dict['备注富文本'] = rich_text_content

        rich_text_lines = rich_text_content.split('\n')
        for line in rich_text_lines:
            if'：' in line:
                line = line.replace('：', ':')
            if ':' in line:
                key, value = line.split(':', 1)
                text_dict[key] = value.strip()
        return text_dict

    def get_table_data(self,url):
        # 发送HTTP请求获取网页内容
        response = requests.get(url, cookies=cookies)
        response.raise_for_status()  # 确保请求成功

        # 使用BeautifulSoup解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')

        # 找到表格
        table = soup.find('table', {'id': 'simple-table'})

        # 初始化一个列表来存储表格数据
        data = []

        # 遍历表格的每一行
        for row in table.find_all('tr'):
            columns = row.find_all('td')
            if columns:
                # 获取每列的文本内容
                file_name = columns[0].text.strip()
                file_size = columns[1].text.strip()
                download_link = columns[2].find('a', title='下载')['onclick'].split("'")[1]  # 获取下载链接
                copy_link = columns[2].find('a', title='复制链接')['onclick'].split("'")[1]  # 获取复制链接

                # 将每行的数据存储为字典
                row_data = {
                    'file_name': file_name,
                    'file_size': file_size,
                    'download_link': download_link,
                    'copy_link': copy_link
                }
                data.append(row_data)
        return data

    def download_file(self,url, file_path):
        # 发送HTTP GET请求
        response = requests.get(url, stream=True,cookies=cookies)
        response.raise_for_status()  # 确保请求成功

        # 打开文件以写入二进制模式
        with open(file_path, 'wb') as file:
            # 将响应内容写入文件
            for chunk in response.iter_content(chunk_size=8192):
                file.write(chunk)
    def ensure_directory_exists(self,directory):
        # 检查目录是否存在
        if not os.path.exists(directory):
            # 如果目录不存在，则创建目录
            os.makedirs(directory)
            print(f"Directory '{directory}' was created.")
        else:
            print(f"Directory '{directory}' already exists.")

    def save_data(self,data,filename):
        # 检查文件是否存在
        file_exists = os.path.isfile(filename)

        # 打开文件，如果不存在则创建，如果存在则追加
        with open(filename, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            # 如果文件不存在，写入列标题
            if not file_exists:
                writer.writerow(list(data.keys()))
            # 写入数据
            writer.writerow(list(data.values()))

    def run(self):
        cookies = self.login()
        data = self.get_data(cookies)
        return data

if __name__ == '__main__':
    url="https://diagnosis.kion-cn.com/lindediagnosis/logout"
    username = "qizichao"
    password = "Linde123"
    output_file = "datafile.txt"
    diagnosis = get_Diagnosis_Info(url=url, username=username, password=password,output_file=output_file)
    cookies = diagnosis.get_login()




    # 获取OTA相关文件信息及链接
    url1 = 'https://diagnosis.kion-cn.com/lindediagnosis/writePacket/todownload.do?id='
    url2 = 'https://diagnosis.kion-cn.com/lindediagnosis/writePacket/toupdate.do?id='
    # 下载前通过网页查看一下ID的值，将值的范围填写在下面的循环中，就可自动进行下载了

    # List of numbers as strings
numbers_as_strings = [
    '545', '543', '537', '536', '535', '534', '532', '531', '529', '528',
    '526', '523', '522', '521', '520', '517', '514', '512', '511', '508',
    '504', '502', '490', '488', '487', '484', '482', '480', '479', '478',
    '477', '474', '471', '469', '467', '464', '462', '461', '458', '457',
    '456', '455', '454', '453', '452', '441', '440', '439', '436', '435',
    '434', '433', '431', '424', '421', '382'
]

# Convert the list of strings to a list of integers
numbers_as_integers = [int(num) for num in numbers_as_strings]

# Traverse and print each number in the array
for number in numbers_as_integers:

    try:
        directory = r'C:\Users\A0080437\Downloads\OTA File List\{id}'.format(id=number)
        diagnosis.ensure_directory_exists(directory)
        output_data={}
        output_data['id'] = number
        file_info_table_data = diagnosis.get_data_info(url2+str(number))
        output_data.update(file_info_table_data)
        diagnosis.save_data(output_data,'dataInfo.csv')
        table_data = diagnosis.get_table_data(url1+str(number))
        for item in table_data:
            file_list_output_data = {}
            file_list_output_data['id'] = number
            file_list_output_data.update(item)
            diagnosis.save_data(file_list_output_data,'fileList.csv')
            file_name = item['file_name']
            filepath = os.path.join(directory, file_name)
            diagnosis.download_file(item['copy_link'], filepath)
    except Exception as e:
        print(e)
        continue


# https://diagnosis.kion-cn.com/lindediagnosis/writePacket/list.do?currentPage=1&showCount=200
# 获取所有已经上传的固件信息数据
# 获取数据直接渲染到一个Table中，获取这部分数据只需要提取Table中的数据即可
