# from rest_framework import serializers
# from knowledge_extract.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
#
# from django.contrib.auth.models import User
#
#
# class UserSerializer(serializers.ModelSerializer):
#     snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
#
#     class Meta:
#         model = User
#         fields = ('id', 'username', 'snippets')
#
#
# class SnippetSerializer(serializers.ModelSerializer):
#     owner = serializers.ReadOnlyField(source='owner.username')
#
#     class Meta:
#         model = Snippet
#         fields = ('id', 'owner', 'title', 'code', 'linenos', 'language', 'style')
import json
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from rest_framework import serializers

from knowledge_extract.models import Snippet, Dataset, ModelRepo, Experiment


class AnnotationDatasetSerializer(serializers.Serializer):
    name = serializers.CharField(max_length=100,
                            help_text="数据集名称,如重复则覆盖")
    task_choices = (
        ("命名实体识别", "命名实体识别"),
        ("关系抽取", "关系抽取"),
        ("实体关系联合抽取", "实体关系联合抽取")
    )
    task = serializers.ChoiceField(choices=task_choices, default="命名实体识别",
                                   help_text="数据集支持的任务,默认为命名实体识别")
    area = serializers.CharField(max_length=100, default="医疗",
                            help_text="数据集所属领域,默认为医疗")

    short_description = serializers.CharField(help_text="数据集的简单描述")
    long_description = serializers.CharField(help_text="数据集的详细描述,为markdown格式的纯文本")
    annotation_data = serializers.JSONField()
    labels = serializers.JSONField()
    
    def split_data(self,data):
        test_data = [e for e in data if e['annotated'] == 0]
        rem_data = [e for e in data if e['annotated'] == 1]
        train_ratio= 0.8
        train_size = int(len(rem_data) * train_ratio)
        dev_size = len(rem_data) - train_size
        train_data = rem_data[:train_size]
        dev_data = rem_data[-dev_size:]
        return train_data,dev_data,test_data

    def generate_interface(self,label):
        with open('knowledge_extract/interface_template', 'r') as f:
            template = f.read()
        return template % str(label)

    
    def create(self, validated_data):
        # 获取data和label
        data = validated_data.pop('annotation_data',None)
        labels = validated_data.pop('labels',None)
        # 清洗数据集
        data = [e for e in data if e['words'] and ((e['annotated'] == 1 and e['entities']) or (e['annotated'] == 0))]
        # 为每个样本加一个index字段
        data = [{**e,'index':i} for i,e in enumerate(data)]
        # 将data切分为train_data,dev_data,test_data
        train_data, dev_data,test_data = self.split_data(data)
        # 根据label生成process_code
        process_code = self.generate_interface(labels)
        # 创建 Dataset 实例
        dataset = Dataset(
            name=validated_data.get('name'),
            task=validated_data.get('task'),
            area=validated_data.get('area'),
            short_description=validated_data.get('short_description'),
            long_description=validated_data.get('long_description'),
            owner=User.objects.get(username='admin')  # 此处假定owner已经通过验证并可直接使用
        )
        dataset.save()
        # 保存数据和代码
        dataset.train_data.save('train.json', ContentFile(json.dumps(train_data).encode('utf-8')))
        dataset.dev_data.save('dev.json', ContentFile(json.dumps(dev_data).encode('utf-8')))
        dataset.test_data.save('test.json', ContentFile(json.dumps(test_data).encode('utf-8')))
        dataset.process_code.save('interface.py', ContentFile(process_code.encode('utf-8')))

        dataset.save()  # 再次保存以更新文件字段
        # 返回Dataset实例
        return dataset

    def update(self, instance, validated_data):
        # 更新基本字段
        instance.name = validated_data.get('name', instance.name)
        instance.task = validated_data.get('task', instance.task)
        instance.area = validated_data.get('area', instance.area)
        instance.short_description = validated_data.get('short_description',
                                                        instance.short_description)
        instance.long_description = validated_data.get('long_description',
                                                       instance.long_description)

        # 如果提供了annotation_data和labels，则进行处理
        data = validated_data.get('annotation_data', None)
        labels = validated_data.get('labels', None)

        if data and labels:
            # 清洗数据集
            data = [e for e in data if e['words'] and ((e['annotated'] == 1 and e['entities']) or (e['annotated'] == 0))]
            # 为每个样本加一个index字段
            data = [{**e, 'index': i} for i, e in enumerate(data)]
            # 将data切分为train_data, dev_data, test_data
            train_data, dev_data, test_data = self.split_data(data)
            # 根据label生成process_code
            process_code = self.generate_interface(labels)

            # 更新数据和代码文件
            instance.train_data.save('train.json',
                                     ContentFile(json.dumps(train_data).encode('utf-8')))
            instance.dev_data.save('dev.json', ContentFile(json.dumps(dev_data).encode('utf-8')))
            instance.test_data.save('test.json', ContentFile(json.dumps(test_data).encode('utf-8')))
            instance.process_code.save('interface.py', ContentFile(process_code.encode('utf-8')))

        # 保存更新后的实例
        instance.save()

        return instance



class DatasetSerializer(serializers.ModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')
    # file = serializers.FileField(
    #     max_length=1024 * 1024 * 1024,  # 限制文件大小为5MB，可根据需要调整
    #     required=False,  # 根据需求设置是否为必填字段
    # )

    # def create(self, validated_data):
    #     # 提取文件数据
    #     file_data = validated_data.pop('file', None)
    #
    #     # 创建Dataset实例
    #     dataset = Dataset.objects.create(**validated_data)
    #
    #     # 如果提供了文件数据，保存它
    #     if file_data is not None:
    #         dataset.file.save(file_data.name, file_data)
    #
    #     return dataset
    class Meta:
        model = Dataset
        # fields = '__all__'
        exclude = ['data_source','experiment_times','train_data_path','dev_data_path','test_data_path','process_code_path','download_url']


class DatasetListSerializer(serializers.ModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')

    class Meta:
        model = Dataset
        fields = ('id', 'name', 'short_description', 'created',
                  'owner', 'experiment_times', 'task', 'area')


class ModelRepoSerializer(serializers.ModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')

    class Meta:
        model = ModelRepo
        fields = '__all__'


class ExperimentSerializer(serializers.ModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')

    class Meta:
        model = Experiment
        fields = '__all__'

class PredictionSerializer(serializers.Serializer):
    predict_data = serializers.JSONField()
    raw_text = serializers.CharField()

class SamplingSerializer(serializers.Serializer):
    sampling_dataset = serializers.CharField()
    sampling_task = serializers.CharField()
    sampling_num = serializers.IntegerField()

class ExperimentListSerializer(serializers.ModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')

    class Meta:
        model = Experiment
        fields = ('id', 'dataset', 'model', 'task', 'owner', 'created',
                  'run_status', 'model_version', 'description', 'dataset_name', 'model_name','metric')


class SnippetSerializer(serializers.HyperlinkedModelSerializer):
    owner = serializers.ReadOnlyField(source='owner.username')
    highlight = serializers.HyperlinkedIdentityField(
        view_name='snippet-highlight', format='html')

    class Meta:
        model = Snippet
        fields = ('url', 'id', 'highlight', 'owner', 'title', 'code',
                  'linenos', 'language', 'style')


class UserSerializer(serializers.HyperlinkedModelSerializer):
    datasets = serializers.HyperlinkedRelatedField(
        many=True, view_name='dataset-detail', read_only=True)

    class Meta:
        model = User
        fields = ('url', 'id', 'username', 'datasets')
