"""
 * Copyright (c) KylinSoft  Co., Ltd. 2024.All rights reserved.
 * PilotGo-plugin licensed under the Mulan Permissive Software License, Version 2.
 * See LICENSE file for more details.
 * Author: wangqingzheng <wangqingzheng@kylinos.cn>
 * Date: Mon Feb 26 11:15:07 2024 +0800
"""
import logging
import numpy as np
from rest_framework import status, viewsets
# Create your views here.
from appStore.project.models import Project
from appStore.stream.models import Stream
from appStore.stream.serializers import StreamSerializer
from appStore.utils.common import json_response, get_error_message, get_analyze_data

log = logging.getLogger('kytuninglog')

class StreamViewSet(viewsets.ModelViewSet):
    """
    stream数据管理
    """
    queryset = Stream.objects.all().order_by('id')
    serializer_class = StreamSerializer

    def get_data(self, serializer, datas, title_index, column_index, base_column_index):
        if len(serializer) == 0 or len(serializer) == 1:
            if len(serializer) == 0:
                datas[0]['column' + str(column_index)] = 'Stream#' + str(title_index)
                for i in range(1, 14):
                    datas[i]['column' + str(column_index)] = None
            else:
                # 基准数据和对比数据的全部数据
                datas[0]['column' + str(column_index)] = 'Stream#' + str(title_index)
                datas[1]['column' + str(column_index)] = Project.objects.filter(env_id=serializer.first().env_id).first().project_name
                datas[2]['column' + str(column_index)] = serializer.first().execute_cmd
                datas[3]['column' + str(column_index)] = serializer.first().modify_parameters
                datas[4]['column' + str(column_index)] = serializer.first().single_array_size
                datas[5]['column' + str(column_index)] = serializer.first().single_copy
                datas[6]['column' + str(column_index)] = serializer.first().single_scale
                datas[7]['column' + str(column_index)] = serializer.first().single_add
                datas[8]['column' + str(column_index)] = serializer.first().single_triad
                datas[9]['column' + str(column_index)] = serializer.first().multi_array_size
                datas[10]['column' + str(column_index)] = serializer.first().multi_copy
                datas[11]['column' + str(column_index)] = serializer.first().multi_scale
                datas[12]['column' + str(column_index)] = serializer.first().multi_add
                datas[13]['column' + str(column_index)] = serializer.first().multi_triad
            column_index += 1
            title_index += 1
            title = '平均值(基准数据)' if not base_column_index else '平均值'
            # 基准数据和对比数据的平均数据
            datas[0]['column' + str(column_index)] = title
            datas[1]['column' + str(column_index)] = datas[1]['column' + str(column_index - 1)]
            datas[2]['column' + str(column_index)] = ''
            for i in range(4, 14):
                datas[i]['column' + str(column_index)] = datas[i]['column' + str(column_index - 1)]
            column_index += 1
        else:
            # 计算平均值
            single_array_size_list = [d.single_array_size for d in serializer if d.single_array_size is not None]
            single_copy_list = [d.single_copy for d in serializer if d.single_copy is not None]
            single_scale_list = [d.single_scale for d in serializer if d.single_scale is not None]
            single_add_list = [d.single_add for d in serializer if d.single_add is not None]
            single_triad_list = [d.single_triad for d in serializer if d.single_triad is not None]
            multi_array_size_list = [d.multi_array_size for d in serializer if d.multi_array_size is not None]
            multi_copy_list = [d.multi_copy for d in serializer if d.multi_copy is not None]
            multi_scale_list = [d.multi_scale for d in serializer if d.multi_scale is not None]
            multi_add_list = [d.multi_add for d in serializer if d.multi_add is not None]
            multi_triad_list = [d.multi_triad for d in serializer if d.multi_triad is not None]

            # 计算每个数组的平均值
            average_single_array_size = np.mean(single_array_size_list).round(2) if not np.isnan(np.mean(single_array_size_list)) else None
            average_single_copy = np.mean(single_copy_list).round(2) if not np.isnan(np.mean(single_copy_list)) else None
            average_single_scale = np.mean(single_scale_list).round(2) if not np.isnan(np.mean(single_scale_list)) else None
            average_single_add = np.mean(single_add_list).round(2) if not np.isnan(np.mean(single_add_list)) else None
            average_single_triad = np.mean(single_triad_list).round(2) if not np.isnan(np.mean(single_triad_list)) else None
            average_multi_array_size = np.mean(multi_array_size_list).round(2) if not np.isnan(np.mean(multi_array_size_list)) else None
            average_multi_copy = np.mean(multi_copy_list).round(2) if not np.isnan(np.mean(multi_copy_list)) else None
            average_multi_scale = np.mean(multi_scale_list).round(2) if not np.isnan(np.mean(multi_scale_list)) else None
            average_multi_add = np.mean(multi_add_list).round(2) if not np.isnan(np.mean(multi_add_list)) else None
            average_multi_triad = np.mean(multi_triad_list).round(2) if not np.isnan(np.mean(multi_triad_list)) else None

            # 基准数据和对比数据的全部数据
            for data in serializer:
                datas[0]['column' + str(column_index)] = 'Stream#' + str(title_index)
                datas[1]['column' + str(column_index)] = Project.objects.filter(env_id=data.env_id).first().project_name
                datas[2]['column' + str(column_index)] = data.execute_cmd
                datas[3]['column' + str(column_index)] = data.modify_parameters
                datas[4]['column' + str(column_index)] = data.single_array_size
                datas[5]['column' + str(column_index)] = data.single_copy
                datas[6]['column' + str(column_index)] = data.single_scale
                datas[7]['column' + str(column_index)] = data.single_add
                datas[8]['column' + str(column_index)] = data.single_triad
                datas[9]['column' + str(column_index)] = data.multi_array_size
                datas[10]['column' + str(column_index)] = data.multi_copy
                datas[11]['column' + str(column_index)] = data.multi_scale
                datas[12]['column' + str(column_index)] = data.multi_add
                datas[13]['column' + str(column_index)] = data.multi_triad
                column_index += 1
                title_index += 1
            title = '平均值(基准数据)' if not base_column_index else '平均值'
            # 基准数据和对比数据的平均数据
            datas[0]['column' + str(column_index)] = title
            datas[1]['column' + str(column_index)] = datas[1]['column' + str(column_index - 1)]
            datas[2]['column' + str(column_index)] = ''
            datas[3]['column' + str(column_index)] = ''
            datas[4]['column' + str(column_index)] = average_single_array_size
            datas[5]['column' + str(column_index)] = average_single_copy
            datas[6]['column' + str(column_index)] = average_single_scale
            datas[7]['column' + str(column_index)] = average_single_add
            datas[8]['column' + str(column_index)] = average_single_triad
            datas[9]['column' + str(column_index)] = average_multi_array_size
            datas[10]['column' + str(column_index)] = average_multi_copy
            datas[11]['column' + str(column_index)] = average_multi_scale
            datas[12]['column' + str(column_index)] = average_multi_add
            datas[13]['column' + str(column_index)] = average_multi_triad
            column_index += 1
        if not base_column_index:
            # 记录基准数据
            base_column_index = column_index - 1
        else:
            # 对比数据的对比值
            datas[0]['column' + str(column_index)] = '对比值'
            datas[1]['column' + str(column_index)] = ''
            datas[2]['column' + str(column_index)] = ''
            datas[3]['column' + str(column_index)] = ''
            for i in range(4, 14):
                datas[i]['column' + str(column_index)] = \
                    "%.2f%%" % ((datas[i]['column' + str(column_index - 1)] - datas[i][
                        'column' + str(base_column_index)]) / datas[i][
                                    'column' + str(base_column_index)] * 100) if datas[i]['column' + str(
                        column_index - 1)] is not None and datas[i]['column' + str(
                        base_column_index)] is not None else None
            column_index += 1
        return datas, title_index, column_index, base_column_index

    def list(self, request, *args, **kwargs):
        env_id = request.GET.get('env_id')
        comparsionIds = request.GET.get('comparsionIds')
        comparsionIds = comparsionIds.split(',')
        base_queryset = Stream.objects.filter(env_id=env_id).all()
        if not base_queryset:
            return json_response({}, status.HTTP_200_OK, '列表')
        datas = [{'column1': 'Stream', 'column2': ''}, {'column1': '项目名称', 'column2': ''},
                 {'column1': '执行命令', 'column2': ''}, {'column1': '修改参数', 'column2': ''},
                 {'column1': '单线程', 'column2': 'Array size'}, {'column1': '单线程', 'column2': 'Copy'},
                 {'column1': '单线程', 'column2': 'Scale'}, {'column1': '单线程', 'column2': 'Add'},
                 {'column1': '单线程', 'column2': 'Triad'}, {'column1': '多线程', 'column2': 'Array size'},
                 {'column1': '多线程', 'column2': 'Copy'}, {'column1': '多线程', 'column2': 'Scale'},
                 {'column1': '多线程', 'column2': 'Add'}, {'column1': '多线程', 'column2': 'Triad'}, ]
        title_index = 1
        column_index = 3
        base_column_index = ''
        datas, title_index, column_index, base_column_index = self.get_data(base_queryset, datas, title_index, column_index, base_column_index)
        if comparsionIds != ['']:
            # 处理对比数据
            for comparativeId in comparsionIds:
                comparsion_queryset = Stream.objects.filter(env_id=comparativeId).all()
                datas, title_index, column_index, base_column_index = self.get_data(comparsion_queryset, datas, title_index, column_index, base_column_index)

        analyze_data = get_analyze_data(datas,'stream')
        all_datas = {'datas': datas, 'analyze_data': analyze_data}
        return json_response(all_datas, status.HTTP_200_OK, '列表')

    def get_modify_stream(self, request, *args, **kwargs):
        env_id = request.GET.get('env_id')
        queryset = Stream.objects.filter(env_id=env_id).all()
        if not queryset:
            return json_response({}, status.HTTP_200_OK, '列表')
        serializer = self.get_serializer(queryset, many=True)
        return json_response(serializer.data, status.HTTP_200_OK, '列表')

    def create(self, request, *args, **kwargs):
        serializer_stream_errors = []
        error_message = []
        for k, stream_json in request.__dict__['data_stream'].items():
            if k.lower().startswith('stream'):
                data_stream = {}
                data_stream['env_id'] = request.__dict__['data_stream']['env_id']
                # data_stream['single_thread'] = '单线程'
                # data_stream['multi_threading'] = '多线程'
                data_stream['execute_cmd'] = stream_json.get('execute_cmd')
                data_stream['modify_parameters'] = stream_json.get('modify_parameters')
                data_stream['single_array_size'] = stream_json['单线程']['Array size']
                data_stream['single_copy'] = stream_json['单线程']['Copy']
                data_stream['single_scale'] = stream_json['单线程']['Scale']
                data_stream['single_add'] = stream_json['单线程']['Add']
                data_stream['single_triad'] = stream_json['单线程']['Triad']
                data_stream['multi_array_size'] = stream_json['多线程']['Array size']
                data_stream['multi_copy'] = stream_json['多线程']['Copy']
                data_stream['multi_scale'] = stream_json['多线程']['Scale']
                data_stream['multi_add'] = stream_json['多线程']['Add']
                data_stream['multi_triad'] = stream_json['多线程']['Triad']
                data_stream = {key: value if not isinstance(value, str) or value != '' else None for key, value in data_stream.items()}
                serializer_stream = StreamSerializer(data=data_stream)
                if serializer_stream.is_valid():
                    self.perform_create(serializer_stream)
                else:
                    log.info('stram数据存储错误 ：%s，'%serializer_stream.errors)
                    log.info('stream存储数据为 ：%s，'%data_stream)
                    serializer_stream_errors.append(serializer_stream.errors)
                    error_message.append(get_error_message(serializer_stream))

        if serializer_stream_errors:
            print(serializer_stream_errors, "stream")
            return json_response(serializer_stream_errors, status.HTTP_400_BAD_REQUEST, error_message)
        else:
            return

    def put(self, request, *args, **kwargs):
        id = request.data.get('id')
        if not id or not Stream.objects.filter(id=id):
            return json_response({}, status.HTTP_205_RESET_CONTENT, '请传递正确的测试id')
        env_id = Stream.objects.filter(id=id).first().env_id
        user_name = Project.objects.filter(env_id=env_id).first().user_name
        if request.user.is_superuser or request.user.chinese_name == user_name:
            stream_data = Stream.objects.get(id=id)
            if not stream_data:
                return json_response({}, status.HTTP_205_RESET_CONTENT, '没有该数据')
            stream_data.single_array_size = request.data.get('single_array_size')
            stream_data.single_copy = request.data.get('single_copy')
            stream_data.single_scale = request.data.get('single_scale')
            stream_data.single_add = request.data.get('single_add')
            stream_data.single_triad = request.data.get('single_triad')
            stream_data.multi_array_size = request.data.get('multi_array_size')
            stream_data.multi_copy = request.data.get('multi_copy')
            stream_data.multi_scale = request.data.get('multi_scale')
            stream_data.multi_add = request.data.get('multi_add')
            stream_data.multi_triad = request.data.get('multi_triad')
            stream_data.save()
            return json_response({}, status.HTTP_200_OK, '修改成功')
        else:
            return json_response({}, status.HTTP_205_RESET_CONTENT, '此用户不允许修改该数据')


