import csv
import io
import json
import logging
from collections import Counter
from itertools import chain
import itertools as it
from pydoc import Doc
import re
import os
from typing import Tuple, List, Dict, Any
import emoji
import requests
from pathlib import Path

from app.settings import I3CITY_IP, I3CITY_PORT
from django.db.utils import IntegrityError
from django.http import HttpResponseRedirect, JsonResponse, HttpResponse
from django.shortcuts import get_object_or_404, get_list_or_404
from django.urls import reverse
from django.forms import model_to_dict
from django.utils.encoding import escape_uri_path
from django.views.decorators.csrf import csrf_exempt
from django.middleware.csrf import get_token
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, generics, filters, mixins
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser
from rest_framework import status
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema


from .models import Project, Label, Document, Setting, RecommendationHistory, SequenceAnnotation, RelationLabel, \
    AttributeLabel, AttributeAnnotation, RelationAnnotation, RecommendationRule, PreSettings, TrainedModel
from .permissions import IsAdminUserAndWriteOnly, IsProjectUser, IsOwnAnnotation
from .serializers import ProjectSerializer, LabelSerializer, DocumentSerializer, SettingSerializer, \
    RecommendationHistorySerializer, RelationLabelSerializer, AttributeLabelSerializer, AttributeAnnotationSerializer,\
    RecommendationRuleSerializer, PreSettingsSerializer, TrainedModelSerializer
import logging
logger = logging.getLogger(__name__)
# import spacy
# from spacy.tokens import Doc
import time
# from alpaca_serving.client import *
from .client import *

# TODO:删掉这个
alpaca_client = None
BASE_DIR=Path(__file__).resolve().parent

from server.service.client import ModelServerClient


class ProjectViewSet(viewsets.ModelViewSet):
    queryset = Project.objects.all()
    serializer_class = ProjectSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsAdminUserAndWriteOnly)

    def get_queryset(self):
        return self.request.user.projects

    @action(methods=['get'], detail=True)
    def progress(self, request, pk=None):
        project = self.get_object()
        return Response(project.get_progress())

    @action(detail=True, methods=['get'], url_path='traniedmodels')
    def get_experiments(self, request, *args, **kwargs):
        """
        获取某个数据集的所有实验
        """
        instance = self.get_object()
        trained_model_list = TrainedModel.objects.filter(project_id=instance.id)
        # page = self.paginate_queryset(trained_model_list)
        serializer = TrainedModelSerializer(trained_model_list, many=True)
        return Response(serializer.data)  # self.get_paginated_response(serializer.data)


class LabelList(generics.ListCreateAPIView):
    queryset = Label.objects.all()
    serializer_class = LabelSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return Label.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])
        return queryset

    def perform_create(self, serializer):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        serializer.save(project=project)


class RelationLabelList(generics.ListCreateAPIView):
    queryset = RelationLabel.objects.all()
    serializer_class = RelationLabelSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RelationLabel.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])
        return queryset

    def perform_create(self, serializer):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        serializer.save(project=project)


class AttributeLabelList(generics.ListCreateAPIView):
    queryset = AttributeLabel.objects.all()
    serializer_class = AttributeLabelSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return AttributeLabel.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])
        if not self.request.query_params.get('label_ids'):
            return queryset
        label_ids = self.request.query_params.get('label_ids')
        label_ids = list(map(int, label_ids.split(",")))

        queryset = self.queryset.filter(label__in=label_ids)
        return queryset

    def perform_create(self, serializer):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        serializer.save(project=project)

    @swagger_auto_schema(
        manual_parameters=[
            openapi.Parameter('label_ids', openapi.IN_QUERY,
                              type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), required=False)
        ]
    )
    def get(self, *args, **kwargs):
        return self.list(self, *args, **kwargs)


class ProjectStatsAPI(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get(self, request, *args, **kwargs):
        p = get_object_or_404(Project, pk=self.kwargs['project_id'])
        labels = [label.text for label in p.labels.all()]
        users = [user.username for user in p.users.all()]
        relations = [relation_label.relation_name for relation_label in p.relation_labels.all()]
        docs = [doc for doc in p.documents.all()]
        nested_labels = [[a.label.text for a in doc.get_annotations()] for doc in docs]
        nested_users = [[a.user.username for a in doc.get_annotations()] for doc in docs]
        nested_relations = [[a.relation_label.relation_name for a in doc.get_relation_annotations()] for doc in docs]

        label_count = Counter(chain(*nested_labels))
        label_data = [label_count[name] for name in labels]

        user_count = Counter(chain(*nested_users))
        user_data = [user_count[name] for name in users]

        relation_count = Counter(chain(*nested_relations))
        relation_data = [relation_count[name] for name in relations]

        response = {'label': {'labels': labels, 'data': label_data},
                    'user': {'users': users, 'data': user_data},
                    'relation': {'relations': relations, 'data': relation_data}}

        return Response(response)


class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = Label.objects.all()
    serializer_class = LabelSerializer
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return Label.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])

        return queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['label_id'])
        self.check_object_permissions(self.request, obj)

        return obj


class RelationLabelDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = RelationLabel.objects.all()
    serializer_class = RelationLabelSerializer
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RelationLabel.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])

        return queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['relation_id'])
        self.check_object_permissions(self.request, obj)

        return obj


class AttributeLabelDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = AttributeLabel.objects.all()
    serializer_class = AttributeLabelSerializer
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return AttributeLabel.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])

        return queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['attribute_id'])
        self.check_object_permissions(self.request, obj)

        return obj

class DocumentList(generics.ListCreateAPIView):
    queryset = Document.objects.all()

    filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
    search_fields = ('text',)
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import SequenceDocumentSerializer
            return SequenceDocumentSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_document_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return Document.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        if not self.request.query_params.get('active_indices'):
            return queryset
        active_indices = self.request.query_params.get('active_indices')
        active_indices = list(map(int, active_indices.split(",")))

        queryset = project.get_index_documents(active_indices)
        #print(self.request.query_params.get("offset"))
        if not self.request.query_params.get('is_checked'):
            return queryset

        is_null = self.request.query_params.get('is_checked') == 'true'
        queryset = project.get_documents(is_null).distinct()
        return queryset

    @swagger_auto_schema(
        manual_parameters=[
            openapi.Parameter('active_indices', openapi.IN_QUERY, description="indices returned by active learning",
                              type=openapi.TYPE_ARRAY,items=openapi.Items(type=openapi.TYPE_INTEGER),required=False)
        ]
    )
    def get(self,*args,**kwargs):
        return self.list(self,*args,**kwargs)

    def perform_create(self, serializer):
        try:
            project = get_object_or_404(Project, pk=self.kwargs['project_id'])
            serializer.save(project=project)
        except IntegrityError:
            print("create error")


class DocumentDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = Document.objects.all()
    serializer_class = DocumentSerializer
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return Document.objects.none()
        queryset = self.queryset.filter(project=self.kwargs['project_id'])
        return queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['doc_id'])
        self.check_object_permissions(self.request, obj)
        return obj

    def patch(self, request, *args, **kwargs):
        return self.partial_update(request, *args, **kwargs)


class AnnotationList(generics.ListCreateAPIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import SequenceAnnotationSerializer
            return SequenceAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return SequenceAnnotation.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        document = project.documents.get(id=self.kwargs['doc_id'])
        self.queryset = document.get_annotations()
        self.queryset = self.queryset.filter(user=self.request.user)

        return self.queryset

    def perform_create(self, serializer):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        serializer.save(document=doc, user=self.request.user)

    def delete(self, request, *args, **kwargs):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        doc.delete_annotations()
        return Response(status=status.HTTP_204_NO_CONTENT)

    def create(self, request, *args, **kwargs):
        try:
            return super(AnnotationList, self).create(request, *args, **kwargs)
        except IntegrityError:
            print("Error")
            content = {'error': 'IntegrityError'}
            return Response(content, status=status.HTTP_400_BAD_REQUEST)


class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
    permission_classes = (IsAuthenticated, IsProjectUser, IsOwnAnnotation)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import SequenceAnnotationSerializer
            return SequenceAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return SequenceAnnotation.objects.none()
        document = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        self.queryset = document.get_annotations()

        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['annotation_id'])
        self.check_object_permissions(self.request, obj)

        return obj

    def delete(self, request, *args, **kwargs):
        try:
            annotation = SequenceAnnotation.objects.filter(pk=kwargs["annotation_id"])[0]
            word = annotation.document.text[annotation.start_offset:annotation.end_offset]
            if annotation.label:
                history = RecommendationHistory.objects.filter(word=word, label=annotation.label)
                if history:
                    history = history[0]
                    history.delete()
            annotation.delete()
        except Exception as e:
            print("error in deletion")
            print(e)
        return Response(status=status.HTTP_204_NO_CONTENT)


class RelationAnnotationList(generics.ListCreateAPIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import RelationAnnotationSerializer
            return RelationAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_relation_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RelationAnnotation.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        document = project.documents.get(id=self.kwargs['doc_id'])
        self.queryset = document.get_relation_annotations()

        return self.queryset

    def perform_create(self, serializer):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        serializer.save(document=doc)

    def delete(self, request, *args, **kwargs):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        doc.delete_relation_annotations()
        return Response(status=status.HTTP_204_NO_CONTENT)

    def create(self, request, *args, **kwargs):
        try:
            return super(RelationAnnotationList, self).create(request, *args, **kwargs)
        except IntegrityError:
            print("Error")
            content = {'error': 'IntegrityError'}
            return Response(content, status=status.HTTP_400_BAD_REQUEST)


class RelationAnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import RelationAnnotationSerializer
            return RelationAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_relation_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RelationAnnotation.objects.none()
        document = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        self.queryset = document.get_relation_annotations()

        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['relation_annotation_id'])
        self.check_object_permissions(self.request, obj)

        return obj

    def delete(self, request, *args, **kwargs):
        try:
            annotation = RelationAnnotation.objects.filter(pk=kwargs["relation_annotation_id"])[0]
            annotation.delete()
        except Exception as e:
            print("error in deletion")
            print(e)
        return Response(status=status.HTTP_204_NO_CONTENT)


class AttributeAnnotationList(generics.ListCreateAPIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import AttributeAnnotationSerializer
            return AttributeAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_attribute_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return AttributeAnnotation.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        document = project.documents.get(id=self.kwargs['doc_id'])
        self.queryset = document.get_attribute_annotations()

        return self.queryset

    def perform_create(self, serializer):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        serializer.save(document=doc)

    def delete(self, request, *args, **kwargs):
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        doc.delete_attribute_annotations()
        return Response(status=status.HTTP_204_NO_CONTENT)

    def create(self, request, *args, **kwargs):
        try:
            return super(AttributeAnnotationList, self).create(request, *args, **kwargs)
        except IntegrityError:
            print("Error")
            content = {'error': 'IntegrityError'}
            return Response(content, status=status.HTTP_400_BAD_REQUEST)


class AttributeAnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_serializer_class(self):
        if getattr(self, 'swagger_fake_view', False):
            from .serializers import AttributeAnnotationSerializer
            return AttributeAnnotationSerializer
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.serializer_class = project.get_attribute_annotation_serializer()

        return self.serializer_class

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return AttributeAnnotation.objects.none()
        document = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        self.queryset = document.get_attribute_annotations()

        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['attribute_annotation_id'])
        self.check_object_permissions(self.request, obj)

        return obj

    def delete(self, request, *args, **kwargs):
        try:
            annotation = AttributeAnnotation.objects.filter(pk=kwargs["attribute_annotation_id"])[0]
            annotation.delete()
        except Exception as e:
            print("error in deletion")
            print(e)
        return Response(status=status.HTTP_204_NO_CONTENT)


class PreSettingsList(generics.GenericAPIView, mixins.CreateModelMixin, mixins.UpdateModelMixin):
    queryset = PreSettings.objects.all()
    serializer_class = PreSettingsSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return PreSettings.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset)
        self.check_object_permissions(self.request, obj)
        return obj

    def perform_create(self, serializer):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        serializer.save(project=project, user=self.request.user)

    def get(self, request, *args, **kwargs):
        return Response(self.serializer_class(self.get_object()).data)

    def put(self, request, *args, **kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        _, created = PreSettings.objects.get_or_create(project=project, user=self.request.user,
                                                       defaults=request.data)
        if not created:
            return self.update(request, *args, **kwargs)
        return Response(created)


class SettingList(generics.GenericAPIView, mixins.CreateModelMixin, mixins.UpdateModelMixin):
    queryset = Setting.objects.all()
    serializer_class = SettingSerializer
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return Setting.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset)
        self.check_object_permissions(self.request, obj)
        return obj

    def perform_create(self, serializer):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        serializer.save(project=project, user=self.request.user)

    def get(self, request, *args, **kwargs):
        return Response(self.serializer_class(self.get_object()).data)

    def put(self, request, *args, **kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        _, created = Setting.objects.get_or_create(project=project, user=self.request.user,
                                                   defaults=self.request.data)
        if not created:
            return self.update(request, *args, **kwargs)
        return Response(created)


class RecommendationHistoryList(generics.ListCreateAPIView):
    queryset = RecommendationHistory.objects.all()
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)
    serializer_class = RecommendationHistorySerializer

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RecommendationHistory.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def perform_create(self, serializer):
        try:
            project = get_object_or_404(Project, pk=self.kwargs['project_id'])
            serializer.save(project=project, user=self.request.user)
        except IntegrityError:
            print("The word with that label is already exist in history.")


class RecommendationHistoryDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = RecommendationHistory.objects.all()
    permission_classes = (IsAuthenticated, IsProjectUser)
    serializer_class = RecommendationHistorySerializer

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RecommendationHistory.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['history_id'])
        self.check_object_permissions(self.request, obj)

        return obj


class RecommendationRuleList(generics.ListCreateAPIView):
    queryset = RecommendationRule.objects.all()
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)
    serializer_class = RecommendationRuleSerializer

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RecommendationRule.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def perform_create(self, serializer):
        try:
            project = get_object_or_404(Project, pk=self.kwargs['project_id'])
            serializer.save(project=project, user=self.request.user)
        except IntegrityError:
            print("The rule with that label is already exist in history.")


class RecommendationRuleDetail(generics.RetrieveUpdateDestroyAPIView):
    queryset = RecommendationRule.objects.all()
    permission_classes = (IsAuthenticated, IsProjectUser)
    serializer_class = RecommendationRuleSerializer

    def get_queryset(self):
        if getattr(self, 'swagger_fake_view', False):
            return RecommendationRule.objects.none()
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        self.queryset = self.queryset.filter(project=project, user=self.request.user)
        return self.queryset

    def get_object(self):
        queryset = self.filter_queryset(self.get_queryset())
        obj = get_object_or_404(queryset, pk=self.kwargs['rule_id'])
        self.check_object_permissions(self.request, obj)

        return obj


class RecommendationHistoryView(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get(self, request, project_id=0):
        result = RecommendationHistory.objects.filter(project_id=project_id)
        data = []
        for entity in result:
            perData = {}
            perData['project_id'] = entity.project_id
            perData['id'] = entity.id
            perData['label'] = entity.label.text
            perData['word'] = entity.word
            perData['user_id'] = entity.user_id
            data.append(perData)
        return JsonResponse({'code': 200, 'message': 'success', 'data': list(data)}, status=200)

    def delete(self, request, project_id=0, dict_id=0):
        A = RecommendationHistory.objects.filter(id=dict_id, project_id=project_id)
        if len(A) == 0:
            return JsonResponse({'detail': 'Not found'}, status=404)
        RecommendationHistory.objects.filter(id=dict_id, project_id=project_id).delete()
        return JsonResponse({'message': 'delete dict success'}, status=200)


class DownloadView(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get(self, request, project_id=0, user_id=0, type='csv'):
        project = get_object_or_404(Project, pk=project_id)
        docs = Document.objects.all().filter(project_id=project_id)
        filename = '_'.join(project.name.lower().split())
        if type == 'csv':
            return self.download_csv(filename, docs, user_id)
        elif type == 'dict':
            return self.download_dict(filename, docs, user_id)
        elif type=='kg':
            request.session["url"] = 'first.askgraph.top'
            return self.download_kg(filename, docs, user_id,request.session['url'])
        else:
            return JsonResponse({'detail': 'Not found!'}, status=404)

    def download_csv(self,filename, docs, user_id):
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(escape_uri_path(filename))
        writer = csv.writer(response)
        for d in docs:
            writer.writerows(d.to_csv(user_id=user_id))
        return response

    def download_dict(self,filename, docs, user_id):
        response = HttpResponse(content_type='text/json')
        response['Content-Disposition'] = 'attachment; filename="{}.json"'.format(escape_uri_path(filename))
        from collections import defaultdict
        result = defaultdict()
        entities = defaultdict(dict)
        relations = defaultdict(set)
        texts=set()
        for d in docs:
            entity_result, relation_result,text = d.export_to_json()
            texts.add(text)
            for type in entity_result.keys():
                for ent in entity_result[type].keys():
                    if ent in entities[type].keys():
                        for attri in entity_result[type][ent].keys():
                            if attri in entities[type][ent].keys():
                                entities[type][ent][attri] = entities[type][ent][attri].union(
                                    entity_result[type][ent][attri])
                            else:
                                entities[type][ent][attri] = entity_result[type][ent][attri]
                    else:
                        entities[type][ent] = entity_result[type][ent]
            for key in relation_result.keys():
                relations[key] = relations[key].union(relation_result[key])
        import datetime
        result["id"] = [str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))]
        result["relation"] = relations
        result["entity"] = entities
        result["text"]=texts
        dump = json.dumps(result, ensure_ascii=False,indent=4, default=lambda x: list(x) if isinstance(x, set) else x)
        response.write(dump + "\n")
        return response

    def download_kg(self,filename, docs, user_id,url):
        response = HttpResponse(content_type='text/json')
        response['Content-Disposition'] = 'attachment; filename="{}.json"'.format(escape_uri_path(filename))
        from collections import defaultdict
        result = defaultdict()
        entities = defaultdict(dict)
        relations = defaultdict(set)
        for d in docs:
            entity_result, relation_result = d.export_to_knowledge_graph()
            for type in entity_result.keys():
                for ent in entity_result[type].keys():
                    if ent in entities[type].keys():
                        for attri in entity_result[type][ent].keys():
                            if attri in entities[type][ent].keys():
                                entities[type][ent][attri] = entities[type][ent][attri].union(entity_result[type][ent][attri])
                            else:
                                entities[type][ent][attri] = entity_result[type][ent][attri]
                    else:
                        entities[type][ent] = entity_result[type][ent]
            for key in relation_result.keys():
                relations[key] = relations[key].union(relation_result[key])
        import datetime, requests
        result["id"] = ['-'+str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))]
        result["relation"] = relations
        result["entity"] = entities
        print(result)
        dump = json.dumps(result, ensure_ascii=False, default=lambda x: list(x) if isinstance(x, set) else x)
        headers = {'Content-Type': 'application/json;charset=UTF-8'}
        rsp = requests.post(url="http://{}:{}/v1/mongo/texttooldata".format(I3CITY_IP, I3CITY_PORT),
                            data=dump.encode("utf-8"), headers=headers)
        print(rsp,rsp.text)
        #data = rsp.json()
        print("POST code" + str(rsp.status_code))

        def convert_sets_to_lists(obj):
            if isinstance(obj, set):
                return list(obj)
            elif isinstance(obj, dict):
                return {k: convert_sets_to_lists(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [convert_sets_to_lists(v) for v in obj]
            else:
                return obj

        # 修改后的序列化代码
        result_converted = convert_sets_to_lists(result)
        data_ = {"data": rsp.text, "url": url, "result":result_converted}
        response.write(json.dumps(data_, ensure_ascii=False) + '\n')

        return response


class UploadView(APIView):
    parser_classes = [MultiPartParser]
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    @swagger_auto_schema(
        manual_parameters = [
            openapi.Parameter(name='file',
                          in_=openapi.IN_FORM,
                          type=openapi.TYPE_FILE,
                          required=True,
                          description='上传文件')
        ]
    )
    def post(self, request, project_id=0, user_id=0, type='csv'):
        if type == 'csv':
            return self.docs_csv_upload(request, project_id, user_id)
        elif type == 'dict':
            return self.docs_json_upload(request, project_id, user_id)
        elif type == 'owl':
            return self.docs_owl_upload(request, project_id, user_id)
        elif type == 'rule':
            return self.docs_rule_upload(request,project_id,user_id)
        elif type == 'preview': # 处理标注数据预览功能
            return self.docs_preview_upload(request, project_id, user_id)
        else:
            return JsonResponse({'detail': 'Not found!'}, status=404)

    def addLabelByName(self,labelName, project_id):
        import random
        data = {'shortcut': None, 'text_color': '#ffffff'}
        data['text'] = str(labelName)
        colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
        color = ""
        for i in range(6):
            color += colorArr[random.randint(0, 14)]
        data['background_color'] = "#" + color
        Label.objects.update_or_create(data, text=data['text'], project_id=project_id)

    def parse_owl(self,data):
        """
        解析owl2文件
        :param data: owl文件的每一行
        :type data:
        :return: entity:[str], relation:[(head,relation,tail)], property:[(属性名,概念)]
        :rtype:
        """
        import pyparsing as pp
        ppu = pp.pyparsing_unicode
        entity = pp.Literal("Declaration(Class(:") + pp.Word(ppu.alphas) + pp.Literal("))")
        data_property = pp.Literal("DataPropertyDomain(:") + pp.Word(ppu.alphas) + pp.Literal(":") + pp.Word(
            ppu.alphas) + pp.Literal(")")
        object_property_domain = pp.Literal("ObjectPropertyDomain(:") + pp.Word(ppu.alphas) + pp.Literal(":") + pp.Word(
            ppu.alphas) + pp.Literal(")")
        object_property_range = pp.Literal("ObjectPropertyRange(:") + pp.Word(ppu.alphas) + pp.Literal(":") + pp.Word(
            ppu.alphas) + pp.Literal(")")
        entity_list = []
        relation_list = []
        property_list = []
        data_len = len(data)
        index = 0
        while index < data_len:
            line = data[index]
            if line.startswith("Declaration(Class(:"):
                entity_list.append(entity.parseString(line)[1])
            elif line.startswith("ObjectPropertyDomain(:"):
                line2 = data[index + 1]
                assert line2.startswith("ObjectPropertyRange(:")
                line_parse = object_property_domain.parseString(line)
                line2_parse = object_property_range.parseString(line2)
                r1, head = line_parse[1], line_parse[3]
                r2, tail = line2_parse[1], line2_parse[3]
                assert r1 == r2
                relation_list.append((head, r1, tail))
                index += 1
            elif line.startswith("DataPropertyDomain(:"):
                line_parse = data_property.parseString(line)
                property_list.append((line_parse[1], line_parse[3]))
                assert line_parse[3] in entity_list
            index += 1
        return entity_list, relation_list, property_list
    def clean_text(self, text, options):
        # 清洗【】括号及其中的文本
        if options.get('remove_title_brackets'):
            text = re.sub('【.*?】', '', text)

        # 清洗[]括号及其中的文本
        if options.get('remove_emotion_brackets'):
            text = re.sub('\[.*?\]', '', text)

        # 清洗两个##及其中的文本
        if options.get('remove_hashtags'):
            text = re.sub('#.*?#', '', text)

        # 清洗URL链接
        if options.get('remove_urls'):
            text = re.sub('http[s]?://\S+', '', text)
        # 清洗标点符号
        if options.get('remove_punctuation'):
            preserved_punctuation = options.get('preserved_punctuation', '')
            # 在这里，我们添加了\u4e00-\u9fa5，这是中文字符在Unicode中的范围
            text = re.sub(f"[^\w\s{preserved_punctuation}\u4e00-\u9fa5]", '', text)

        # 清洗HTML标签
        if options.get('remove_html_tags'):
            text = re.sub('<.*?>', '', text)

        # 清洗表情符号
        if options.get('remove_emojis'):
            text = emoji.demojize(text)
            text = re.sub(r':[a-z_]+:', '', text)  # 删除文本形式的emoji

        # 清洗换行符和制表符
        if options.get('remove_newlines_and_tabs'):
            text = text.replace('\n', '').replace('\t', '')
        # 清洗多余空格
        if options.get('remove_extra_spaces'):
            text = re.sub(' +', ' ', text)
        return text
    def docs_csv_upload(self,request, project_id=0, user_id=0):
        from docx import Document as DocxDocument  # 导入 python-docx 库
        csv_file = request.FILES["file"]
        cleaning_options = json.loads(request.POST["options"])
        if csv_file.name.endswith(".csv"):
            p = get_object_or_404(Project, pk=project_id)
            data_set = csv_file.read().decode("UTF-8")
            io_string = io.StringIO(data_set)
            for column in csv.reader(io_string, delimiter=',', quotechar="|"):
                if not column or not column[0].strip():
                    continue
                text = self.clean_text(column[0], cleaning_options)
                Document.objects.create(text=text, project_id=p.id)
            return JsonResponse({'detail': 'success!'}, status=200)
        elif  csv_file.name.endswith(".txt"):
            p = get_object_or_404(Project, pk=project_id)
            data_set = csv_file.read().decode("UTF-8")
            io_string = io.StringIO(data_set)
            for line in io_string:
                # 删除行末尾的换行符，并去除两端的空白字符
                text = line.rstrip('\n').strip()
                # 判断是否为空行，如果是则跳过
                if not text:
                    continue
                text = self.clean_text(text, cleaning_options)
                Document.objects.create(text=text, project_id=p.id)
            return JsonResponse({'detail': 'success!'}, status=200)
        elif csv_file.name.endswith(".json"):
            p = get_object_or_404(Project, pk=project_id)
            data_set = csv_file.read().decode("UTF-8")

            # Add commas between independent dictionaries to form a valid JSON array
            data_set = '[' + data_set.replace('}\n{', '},\n{') + ']'

            # Load JSON data
            data = json.loads(data_set)
            for item in data:
                text = item.get('text', '').strip()
                # 判断是否为空，如果是则跳过
                if not text:
                    continue
                text = self.clean_text(text, cleaning_options)
                Document.objects.create(text=text, project_id=p.id)
            return JsonResponse({'detail': 'success!'}, status=200)
        elif csv_file.name.endswith(".docx"):
            p = get_object_or_404(Project, pk=project_id)

            # 使用 python-docx 读取 .docx 文件
            docx_file = DocxDocument(csv_file)
            for paragraph in docx_file.paragraphs:
                text = paragraph.text.strip()
                if not text:
                    continue
                text = self.clean_text(text, cleaning_options)
                Document.objects.create(text=text, project_id=p.id)

            return JsonResponse({'detail': 'success!'}, status=200)
        else:
            return HttpResponse("This is not a csv or txt file")

    def docs_preview_upload(self,request, project_id=0, user_id=0):
        csv_file = request.FILES["file"]
        if csv_file.name.endswith(".csv"):
            data_set = csv_file.read().decode("UTF-8")
            io_string = io.StringIO(data_set)
            csv_reader = csv.reader(io_string, delimiter=',', quotechar="|")
            rows = []
            while len(rows) < 10:
                row = next(csv_reader)
                if row and row[0].strip():
                    rows.append(row)
            return JsonResponse({'rows': rows}, status=200)
        elif csv_file.name.endswith(".txt"):
            data_set = csv_file.read().decode("UTF-8")
            io_string = io.StringIO(data_set)
            lines = []
            while len(lines) < 10:
                line = next(io_string).rstrip('\n').strip()
                if line:
                    lines.append(line)
            return JsonResponse({'rows': lines}, status=200)
        elif csv_file.name.endswith(".json"):
            data_set = csv_file.read().decode("UTF-8")

            # Add commas between independent dictionaries to form a valid JSON array
            data_set = '[' + data_set.replace('}\n{', '},\n{') + ']'

            # Load JSON data
            data = json.loads(data_set)
            rows = [item.get('text') for item in data if item.get('text', '').strip()]
            return JsonResponse({'rows': rows[:10]}, status=200)
        else:
            return HttpResponse("This is not a csv, txt or json file")


#TODO:OWL的属性解析存在一定问题
    def docs_owl_upload(self,request, project_id=0, user_id=0):
        file = request.FILES['file']
        data = list(filter(lambda x: len(x) > 0 and '#' not in x, map(lambda x: x.decode("UTF-8").strip(), file.readlines())))
        label_list,relation_list,property_list=self.parse_owl(data)
        print(label_list,relation_list,property_list)
        for label in label_list:
            self.addLabelByName(label, project_id)
        for relation in relation_list:
            data={}
            data['relation_name']=str(relation[1])
            head_label=Label.objects.all().filter(project_id=project_id,text=str(relation[0]))[0]
            tail_label=Label.objects.all().filter(project_id=project_id,text=str(relation[2]))[0]
            #print(RelationLabel.objects.filter(relation_name=str(relation[1]),project_id=project_id))
            RelationLabel.objects.update_or_create(data,relation_name=str(relation[1]),head_label_id=head_label.id,
                                                   tail_label_id=tail_label.id,project_id=project_id)
        for property in property_list:
            data={}
            data['attribute_name'] = str(property[0])
            label = Label.objects.all().filter(project_id=project_id,text=str(property[1]))[0]
            AttributeLabel.objects.update_or_create(data,attribute_name=str(property[0]),label_id=label.id,project_id=project_id)
        return JsonResponse({'detail': 'success!'}, status=200)

    def docs_json_upload(self,request, project_id=0, user_id=0):
        file = request.FILES['file']
        contents = json.load(file)
        for k in contents:
            self.addLabelByName(contents[k], project_id)
        labels = Label.objects.all().filter(project_id=project_id)
        text_id = {}
        for l in labels:
            text_id[l.text] = l.id
        data = {}
        for k in contents:
            data['word'] = k
            data['label_id'] = text_id[contents[k]]
            RecommendationHistory.objects.update_or_create(data, word=data['word'], project_id=project_id,
                                                           user_id=user_id)
        return JsonResponse({'detail': 'success!'}, status=200)

    def docs_rule_upload(self,request,project_id=0,user_id=0):
        file = request.FILES['file']
        contents = json.load(file)
        labels = Label.objects.all().filter(project_id=project_id)
        text_id = {}
        for l in labels:
            text_id[l.text] = l.id
        data = {}
        for k in contents:
            data['rule'] = k
            if contents[k]:
                data['label_id'] = text_id[contents[k]]
            else:
                data['label_id']=None
            RecommendationRule.objects.update_or_create(data, rule=data['rule'], project_id=project_id,
                                                           user_id=user_id)
        return JsonResponse({'detail': 'success!'}, status=200)


def index_word2char(entities,doc_id):
    res = []
    l = len(entities)
    i = 0
    while i < l:
        recommend = {'document': doc_id, 'label': entities[i]['type'],
                     'start_offset': entities[i]['beginOffset'], 'end_offset': entities[i]['endOffset']}
        i += 1
        res.append(recommend)
    return res


# 根据doc_id、project设定获得单条文本的预测结果，调用的是旧版模型库
def get_recommendation(doc_id,project,user,opt_o,opt_h,opt_r):
    document = project.documents.get(id=doc_id)
    final_list = []
    o_list = []
    h_list = []
    r_list = []

    if opt_o and alpaca_client is not None:
        response = alpaca_recommend(document.text)
        o_entities = response['entities']
        o_words = response['words']
        print(o_entities,o_words)
        o_list = index_word2char(o_entities,doc_id)

    if opt_h:
        history_queryset = RecommendationHistory.objects.all()
        serializer_class = RecommendationHistorySerializer
        history_queryset = history_queryset.filter(project=project, user=user)
        if len(history_queryset) > 0:
            history_obj = get_list_or_404(history_queryset)
            h_list = serializer_class(history_obj, many=True).data

    if opt_r:
        rule_queryset= RecommendationRule.objects.all()
        serializer_class=RecommendationRuleSerializer
        rule_queryset=rule_queryset.filter(project=project,user=user)
        if len(rule_queryset)>0:
            rule_obj=get_list_or_404(rule_queryset)
            r_list=serializer_class(rule_obj,many=True).data

    tmp_h_list = []
    tmp_o_list = []
    tmp_r_list = []
    tmp_list = []
    if opt_o and alpaca_client is not None:
        for o in o_list:
            tmp_o_list.append(o)

    if opt_r:
        for r in r_list:
            if r['label']:
                label_queryset = Label.objects.all()
                label_queryset = label_queryset.filter(project=project)
                serializer_class = LabelSerializer
                label_obj = get_object_or_404(label_queryset, pk=r['label'])
                label_data = serializer_class(label_obj).data
                r_label=label_data['text']
            else:
                r_label=None
            pattern=re.compile(r['rule'])
            results=pattern.findall(document.text)
            #print(results)
            if results:
                for result in results:
                    end_offset=0
                    while 1:
                        start_offset = document.text.lower().find(result.lower(), end_offset)
                        if start_offset == -1:
                            break
                        end_offset = start_offset + len(result)
                        r_dict = {'document': doc_id, 'label': r_label,
                                  'start_offset': start_offset, 'end_offset': end_offset, "word": result.lower()}
                        tmp_r_list.append(r_dict)


    if opt_h:
        for h in h_list:
            if h['word'].lower() in document.text.lower():
                label_queryset = Label.objects.all()
                serializer_class = LabelSerializer
                label_queryset = label_queryset.filter(project=project)
                label_obj = get_object_or_404(label_queryset, pk=h['label'])
                label_data = serializer_class(label_obj).data
                end_offset = 0
                while 1:
                    start_offset = document.text.lower().find(h['word'].lower(), end_offset)
                    if start_offset == -1:
                        break
                    end_offset = start_offset + len(h['word'])
                    h_dict = {'document': doc_id, 'label': label_data['text'],
                              'start_offset': start_offset, 'end_offset': end_offset, "word": h['word'].lower()}
                    tmp_h_list.append(h_dict)

    if len(tmp_h_list) > 0 and len(tmp_o_list) > 0:
        for tmp_h in tmp_h_list:
            for tmp_o in tmp_o_list[:]:
                o_range = range(tmp_o['start_offset'], tmp_o['end_offset'])
                h_range = range(tmp_h['start_offset'], tmp_h['end_offset'])
                h_range_s = set(h_range)
                if len(h_range_s.intersection(o_range)) > 0:
                    tmp_o_list.remove(tmp_o)
        tmp_list.extend(tmp_h_list)
        tmp_list.extend(tmp_o_list)

    if len(tmp_h_list) > 0 and len(tmp_o_list) == 0:
        tmp_list = tmp_h_list
    if len(tmp_h_list) == 0 and len(tmp_o_list) > 0:
        tmp_list = tmp_o_list

    if len(tmp_list) > 0 and len(tmp_r_list) > 0:
        for tmp in tmp_list:
            for tmp_r in tmp_r_list[:]:
                r_range = range(tmp_r['start_offset'], tmp_r['end_offset'])
                tmp_range = range(tmp['start_offset'], tmp['end_offset'])
                tmp_range_s = set(tmp_range)
                if len(tmp_range_s.intersection(r_range)) > 0:
                    tmp_r_list.remove(tmp_r)
        tmp_list.extend(tmp_r_list)

    if len(tmp_h_list) == 0 and len(tmp_r_list) > 0:
        tmp_list = tmp_r_list

    final_list.extend(tmp_list)
    return final_list


# 调用旧版模型库批量获得预测结果
class RecommendationsView(APIView):
    pagination_class=None
    premission_classes=(IsAuthenticated,IsProjectUser)
    queryset = Document.objects.all()

    @swagger_auto_schema(
        manual_parameters=[
            openapi.Parameter('doc_ids', openapi.IN_QUERY,
                              type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), required=True)
        ]
    )
    def get(self, request, *args, **kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        doc_ids=request.query_params.get('doc_ids')
        doc_ids = list(map(int, doc_ids.split(",")))
        user=self.request.user
        setting_queryset = Setting.objects.all()
        serializer_class = SettingSerializer
        setting_queryset = setting_queryset.filter(project=project, user=user)
        setting_obj = get_object_or_404(setting_queryset)
        setting_data = serializer_class(setting_obj).data
        opt_o = setting_data['onlinelearning']
        opt_h = setting_data['history']
        opt_r = setting_data['rule']
        recommendations=[]
        for doc_id in doc_ids:
            recommendation=get_recommendation(doc_id,project,user,opt_o,opt_h,opt_r)
            recommendations.append({doc_id:recommendation})
        return Response({"recommendations":recommendations})


def construct_predict_data(text, annotation_list):
    text = str(text).strip()
    text = [d for d in text]
    res = []
    for tmp in annotation_list:
        one = text[:tmp[0][0]] + ["<e1>"] + text[tmp[0][0]:tmp[0][1]] + ["</e1>"] \
              + text[tmp[0][1]:tmp[1][0]] + ["<e2>"] + text[tmp[1][0]:tmp[1][1]] + ["</e2>"] \
              + text[tmp[1][1]:]
        print(one)
        res.append(one)
    return res


def relation_predict(alpaca_client, data, cnt=0):
    if alpaca_client is None or len(data) == 0:
        print("alpaca_client is none or data = 0")
        return {'index': [], 'lables': []}
    response = alpaca_client.relation_predict(data)
    if str(response) == 'error':
        print('error')
        time.sleep(2)
        if cnt > 3:
            print("alpaca recommend has 3 times failed")
            return {'index': [], 'lables': []}
        return relation_predict(alpaca_client, data, cnt + 1)
    return response


def get_relation_recommendation(doc_id,project,auto=False):
    document = project.documents.get(id=doc_id)
    annotations = document.get_annotations()
    relation_lables = project.relation_labels.all()
    relation_lable_map = {label.relation_name: label for label in relation_lables}

    annotation_list = []
    for i in annotations:
        tmp = i
        i = model_to_dict(i)
        ann = (i['start_offset'], i['end_offset'], tmp)
        annotation_list.append(ann)
    res = list(it.combinations(annotation_list, 2))
    if len(res) == 0:
        return HttpResponse(200)
    data = construct_predict_data(document.text, res)
    rsp = relation_predict(get_alpaca_client(), data)
    predict_lables = rsp["lables"]
    print(rsp)
    for i, index in enumerate(rsp['index']):
        lable1 = res[index][0][2]
        lable2 = res[index][1][2]
        predict = predict_lables[i].split("$")
        relation_label = relation_lable_map[predict[0]]
        dicection = int(predict[1])
        is_recommend=not auto
        if dicection == 0:
            obj = RelationAnnotation(document=document, relation_label=relation_label,
                                     from_label=lable1, to_label=lable2, is_recommend=is_recommend)
        else:
            obj = RelationAnnotation(document=document, relation_label=relation_label,
                                     from_label=lable2, to_label=lable1, is_recommend=is_recommend)
        obj.save()

    return True


# 调用旧版模型库批量获得关系预测结果
class RelationRecommendationsView(APIView):
    queryset = Document.objects.all()
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    @swagger_auto_schema(
        manual_parameters=[
            openapi.Parameter('doc_ids', openapi.IN_QUERY,
                              type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_INTEGER), required=True)
        ]
    )
    def get(self, request, *args,**kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        doc_ids = request.query_params.get('doc_ids')
        doc_ids = list(map(int, doc_ids.split(",")))
        for doc_id in doc_ids:
            flag=get_relation_recommendation(doc_id,project)
            if not flag:
                print(doc_id,"error")
        return HttpResponse(200)


# 调用旧版模型库获得单条文本关系预测结果
class RelationRecommendationList(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser)

    def get(self, request, *args,**kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        if get_relation_recommendation(self.kwargs['doc_id'],project):
            return HttpResponse(200)


# 合并三个来源的推荐结果
def make_recommendation(history_rec,online_rec,rule_rec):
    recommendation=[]
    def remove_intersection(l1,l2):
        for item1 in l1:
            for item2 in l2:
                item2_range = range(item2['start_offset'], item2['end_offset'])
                item1_range = range(item1['start_offset'], item1['end_offset'])
                item1_range_set = set(item1_range)
                if len(item1_range_set.intersection(item2_range)) > 0:
                    l2.remove(item2)
        return l2

    if len(history_rec) > 0 and len(online_rec) > 0:
        online_rec=remove_intersection(history_rec,online_rec)
    recommendation.extend(history_rec)
    recommendation.extend(online_rec)

    if len(recommendation) > 0 and len(rule_rec) > 0:
        rule_rec=remove_intersection(recommendation,rule_rec)
    recommendation.extend(rule_rec)

    return recommendation


# 获得根据规则的实体推荐结果
def get_rule_recoomendation(indices,project,user):
    rule_queryset = RecommendationRule.objects.all()
    serializer_class = RecommendationRuleSerializer
    rule_queryset = rule_queryset.filter(project=project, user=user)
    rule_recommendations = [[] for _ in indices]

    if len(rule_queryset) > 0:
        rule_obj = get_list_or_404(rule_queryset)
        rule_list = serializer_class(rule_obj, many=True).data
    else:
        return rule_recommendations

    for r in rule_list:
        if r['label']:
            label_queryset = Label.objects.all()
            label_queryset = label_queryset.filter(project=project)
            serializer_class = LabelSerializer
            label_obj = get_object_or_404(label_queryset, pk=r['label'])
            label_data = serializer_class(label_obj).data
            r_label = label_data['text']
        else:
            r_label = None
        pattern = re.compile(r['rule'])
        for i, index in enumerate(indices):
            document = project.documents.get(id=index)
            results = pattern.findall(document.text)
            if results:
                for result in results:
                    end_offset = 0
                    while 1:
                        start_offset = document.text.lower().find(result.lower(), end_offset)
                        if start_offset == -1:
                            break
                        end_offset = start_offset + len(result)
                        r_dict = {'label': r_label,
                                  'start_offset': start_offset, 'end_offset': end_offset, "word": result.lower()}
                        rule_recommendations[i].append(r_dict)
    print(rule_recommendations)
    return rule_recommendations


# 根据历史字典的实体推荐结果
def get_history_recommendation(indices,project,user):
    history_queryset = RecommendationHistory.objects.all()
    serializer_class = RecommendationHistorySerializer
    history_queryset = history_queryset.filter(project=project, user=user)
    history_recommendations = [[] for _ in indices]

    if len(history_queryset) > 0:
        history_obj = get_list_or_404(history_queryset)
        h_list = serializer_class(history_obj, many=True).data
    else:
        return history_recommendations

    for h in h_list:
        for i, index in enumerate(indices):
            document = project.documents.get(id=index)
            if h['word'].lower() in document.text.lower():
                label_queryset = Label.objects.all()
                serializer_class = LabelSerializer
                label_queryset = label_queryset.filter(project=project)
                label_obj = get_object_or_404(label_queryset, pk=h['label'])
                label_data = serializer_class(label_obj).data
                end_offset = 0
                while 1:
                    start_offset = document.text.lower().find(h['word'].lower(), end_offset)
                    if start_offset == -1:
                        break
                    end_offset = start_offset + len(h['word'])
                    h_dict = {'label': label_data['text'],
                              'start_offset': start_offset, 'end_offset': end_offset, "word": h['word'].lower()}
                    history_recommendations[i].append(h_dict)
    print(history_recommendations)
    return history_recommendations


class PredictionView(APIView):
    queryset = Document.objects.all()
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_server_recommendation(self, indices,project,model,task,dataset=None):
        client = ModelServerClient(ip='192.168.249.10', port=8000)
        print('get recom',flush=True)
        text=[]
        for i, index in enumerate(indices):
            document = project.documents.get(id=index)
            text.append(' '.join(list(document.text)))
        print(text)

        assert task in ['ner','re','ner_re']
        if dataset:
            response = client.predict(text=text, task=task, model=model, dataset=dataset, version=0, experiment_id=0)
        else:
            label_list = project.get_label_list(task)
            print(label_list)
            trained_model=TrainedModel.objects.all().filter(project=project,model=model,task=task)[0]
            model_config=json.loads(trained_model.model_config)
            response = client.predict(text=text, task=task, model=model,project_id=project.id,model_config=model_config,label_list=label_list)
        print('response:',response,flush=True)
        result = json.loads(response.message)
        entity_recommendations = [[] for _ in indices]
        relation_recommendations = [[] for _ in indices]
        if 'entities' in result[0].keys():
            for i, item in enumerate(result):
                for entity in item['entities']:
                    entity_recommendations[i].append({'label': entity[0], 'start_offset': entity[1],
                                                   'end_offset': entity[2] + 1})
        #TODO:关系标注模型、实体关系联合标注模型的预测有待测试，返回的结果也可以再与前端沟通调整一下
        if 'relations' in result[0].keys():
            for i,item in enumerate(result):
                for relation in item['relations']:
                    relation_recommendations[i].append(relation)
        print(entity_recommendations)
        return entity_recommendations,relation_recommendations

    @swagger_auto_schema(
        manual_parameters=[
            openapi.Parameter('model', openapi.IN_QUERY,
                              type=openapi.TYPE_STRING,required=False),
            openapi.Parameter('dataset', openapi.IN_QUERY,
                              type=openapi.TYPE_STRING, required=False),
            openapi.Parameter('task',openapi.IN_QUERY,
                              type=openapi.TYPE_STRING,required=False),
            openapi.Parameter('doc_ids',openapi.IN_QUERY,
                              type=openapi.TYPE_ARRAY,items=openapi.Items(type=openapi.TYPE_INTEGER),required=True)
        ]
    )
    def get(self, request, *args, **kwargs):
        project_id = self.kwargs['project_id']
        project = get_object_or_404(Project, pk=project_id)
        doc_ids=self.request.query_params.get('doc_ids')
        doc_ids=list(map(int,doc_ids.split(",")))
        indices=doc_ids
        user = self.request.user
        setting_queryset = Setting.objects.all()
        serializer_class = SettingSerializer
        setting_queryset = setting_queryset.filter(project=project, user=user)
        setting_obj = get_object_or_404(setting_queryset)
        setting_data = serializer_class(setting_obj).data
        print('get acc',flush=True)
        opt_o = setting_data['onlinelearning']
        opt_h = setting_data['history']
        opt_r = setting_data['rule']

        predict_recommendations = [[] for _ in indices]
        history_recommendations = [[] for _ in indices]
        rule_recommendations = [[] for _ in indices]

        model=self.request.query_params.get('model')
        dataset=self.request.query_params.get('dataset')
        task=self.request.query_params.get('task')

        if model and task:
            entity_recommendations,relation_recommendations = self.get_server_recommendation(indices,project,model,task,dataset)
        if opt_h:
            history_recommendations = get_history_recommendation(indices, project, user)
        if opt_r:
            rule_recommendations = get_rule_recoomendation(indices,project, user)

        recommendations = {}
        relations={}
        for i,doc_id in enumerate(indices):
            recommendation = make_recommendation(history_recommendations[i], entity_recommendations[i],
                                                 rule_recommendations[i])
            recommendations[doc_id]=recommendation
            relations[doc_id]=relation_recommendations[i]

        final_result = {
            'indices': indices,
            'recommendations': recommendations,
            'relations': relations
        }
        return Response(final_result)

# 该接口返回：当前待标注样本的id和对其的推荐结果
# 连接模型时：首轮标注随机采样，返回其他推荐结果；第二标注起，返回模型推荐结果+其他推荐结果
# 未连接模型时：顺序返回id，返回其他推荐结果
class AnnotationDataView(APIView):
    queryset = Document.objects.all()
    permission_classes = (IsAuthenticated, IsProjectUser, IsAdminUserAndWriteOnly)

    def get_server_recommendation(self,train_data,test_data,label_list,setting_data):
        client=ModelServerClient(ip='192.168.249.10',port=8000)

        #model_name=setting_data["model_name"]
        #model_config=json.loads(setting_data["model_config"])
        #optimizer_config=json.loads(setting_data["optimizer_config"])

        other_config = {"hidden_dropout_prob": 0.5, 'warmup_proportion': 0.1, 'weight_decay': 0.01,
                        'no_decay': ["bias", "LayerNorm.weight"], 'seed': 42, 'max_grad_norm': 1.0, 'logging_steps': 20}
        model_config = {"batch_size": 2, "num_train_epochs": 2,
                        "other_config": other_config}
        optimizer_config = {"params": {"lr": 3e-5, "eps": 1e-8}}
        response = client.texttool(task='ner', model='bert_span', version=1, project_id=1,
                                   train_data=train_data,
                                   test_data=test_data, label_list=label_list,
                                   model_config=model_config,
                                   optimizer_config=optimizer_config,acquire=setting_data['acquire'])
        result = json.loads(response.message)
        active_indices=result['active_indices']
        predict_recommendations=[[] for _ in active_indices]
        for i,item in enumerate(result['predict_result']):
            for entity in item:
                predict_recommendations[i].append({'label': entity[0], 'start_offset': entity[1],
                                       'end_offset': entity[2] + 1})
        print(predict_recommendations)
        return active_indices,predict_recommendations

    def get(self,request, *args,**kwargs):
        project_id=self.kwargs['project_id']
        project = get_object_or_404(Project, pk=project_id)
        task=self.kwargs['task']
        assert task in ['ner','re','ner_re']
        unannotated_docs = [doc for doc in project.documents.filter(annotated=False)]
        if not len(unannotated_docs):
            return Response(200)
        doc_indices = [doc.id for doc in unannotated_docs]

        user = self.request.user
        setting_queryset = Setting.objects.all()
        serializer_class = SettingSerializer
        setting_queryset = setting_queryset.filter(project=project, user=user)
        setting_obj = get_object_or_404(setting_queryset)
        setting_data = serializer_class(setting_obj).data

        opt_o = setting_data['onlinelearning']
        opt_h = setting_data['history']
        opt_r = setting_data['rule']
        acquire= setting_data['acquire']

        predict_recommendations = [[] for _ in range(acquire)]
        history_recommendations = [[] for _ in range(acquire)]
        rule_recommendations = [[] for _ in range(acquire)]

        if opt_o:
            annotated_docs=[doc for doc in project.documents.filter(annotated=True)]
            train_data=[doc.make_train_data(task) for doc in annotated_docs]
            test_data=[{'text':t.text} for t in unannotated_docs]
            label_list=project.get_label_list(task)
            active_indices, predict_recommendations=self.get_server_recommendation(train_data,test_data,label_list,setting_data)
            chosen_indices = [doc_indices[i] for i in active_indices]
        else:
            chosen_indices = doc_indices[:acquire]
        if opt_h:
            history_recommendations=get_history_recommendation(chosen_indices,project,user)
        if opt_r:
            rule_recommendations=get_rule_recoomendation(chosen_indices,project,user)

        recommendations={}
        l=setting_data['acquire']
        for i in range(l):
            recommendation=make_recommendation(history_recommendations[i],predict_recommendations[i],
                                                    rule_recommendations[i])
            recommendations[chosen_indices[i]]=recommendation

        result={
            'indices': chosen_indices,
            'recommendations': recommendations
        }

        return Response(result)


class TrainedModelViewSet(viewsets.ModelViewSet):
    """
    list:
        获取所有训练模型信息
    create:
        创建一个模型，开始模型训练
    retrieve:
        获取某个模型的详细信息
    update:
        更新某个模型的信息
    partial_update:
        更新某个模型的部分信息
    destroy:
        删除某个实验
    """
    queryset = TrainedModel.objects.all()
    serializer_class = TrainedModelSerializer

    def list(self, request, *args, **kwargs):
        """
        获取所有实验信息
        可以对数据集、模型名称、任务类型进行筛选s
        如/trained_models/?dataset=数据集名称&model=模型名称&task=任务类型
        """
        dataset = self.request.query_params.get('dataset')
        task = self.request.query_params.get('task')
        model = self.request.query_params.get('model')
        trained_model_list = TrainedModel.objects.only('id', 'dataset', 'model', 'task','model_status')
        if dataset:
            trained_model_list = trained_model_list.filter(dataset__name__icontains=dataset)
        if task:
            trained_model_list = trained_model_list.filter(task=task)
        if model:
            trained_model_list = trained_model_list.filter(model__model_name__icontains=model)

        page = self.paginate_queryset(trained_model_list)
        serializer = TrainedModelSerializer(page, many=True)
        return self.get_paginated_response(serializer.data)

    def perform_create(self, serializer):
        """
        创建实验，开始训练模型，若已有模型，则对其进行更新
        """

        project=serializer.validated_data['project']
        dataset=serializer.validated_data['dataset']
        model=serializer.validated_data['model']
        task=serializer.validated_data['task']
        model_config=serializer.validated_data['model_config']
        print(project,dataset)
        assert dataset or project
        if not dataset:
            result=self.create_experiment(model,project,task,model_config)
        else:
            result=self.create_experiment(model,project,task,model_config,dataset)
        if project:
            serializer.save(project=project,model_status='running')
        else:
            serializer.save(model_status='running')
        return Response(result)

    def create_experiment(self,model,project,task,input_config=None,dataset=None):
        client=ModelServerClient(ip='192.168.249.10',port=8000)  # 162.105.88.139
        other_config = {"hidden_dropout_prob": 0.1, 'warmup_proportion': 0.1, 'weight_decay': 0.01,
                    'no_decay': ["bias", "LayerNorm.weight"], 'seed': 42, 'max_grad_norm': 1.0, 'logging_steps': 20,
                    'lstm_hidden_size': 128,
                    'lstm_num_layers': 1, 'lstm_dropout': 0.5}
        model_config = {"batch_size": 2, "num_train_epochs": 2,
                    "other_config": other_config}
        optimizer_config = {"params": {"lr": 3e-5, "eps": 1e-8}}
        if len(input_config):
            input_config = json.loads(input_config)
        for k,v in input_config.items():
            model_config[k]=v
        print(model_config)
        # 指定dataset时，使用模型库数据集开始训练模型，否则，传输指定project中已标注数据作为训练数据，开始模型训练
        # TODO:关系标注模型、实体关系联合标注模型的训练待测试
        if dataset:
            response = client.create_experiment(task=task, model=model, dataset=dataset,
                                                version=0, experiment_id=0,
                                                model_config=model_config,
                                                optimizer_config=optimizer_config)
        else:
            label_list=project.get_label_list(task)
            annotated_docs = [doc for doc in project.documents.filter(annotated=True)]
            train_data = [doc.make_train_data(task) for doc in annotated_docs]
            print(train_data)
            response = client.create_experiment(task=task, model=model,project_id=project.id,
                                                model_config=model_config,
                                                optimizer_config=optimizer_config,
                                                data=train_data,label_list=label_list)
        result = response.message
        print(result)
        return result

    #TODO:retrieve方法：和模型库连接，读取、更新模型metric和model_status.
    #TODO:perform_create创建模型，进行初次训练并在库表中保存模型信息，还需实现update方法，当指定的task、model、project、dataset
    # 与库表中已有的trained_model一致时，使用project中的已标注数据继续训练模型（参照create_experiment获取train_data,调用client.create_experiment）

class SubmitDocs(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser,)

    @swagger_auto_schema(operation_summary='提交时将正在标注的该批数据的annotating置为False，submitted置为True')
    def get(self, request, *args, **kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        annotating_docs=[doc for doc in project.documents.filter(annotating=True)]
        for doc in annotating_docs:
            doc.annotating=False
            doc.submitted=True
            doc.save()
        return Response(200)


# 将预测结果直接添加到数据库表中（用的还是旧版模型库，待订正）
class AutoAnnotateView(APIView):
    pagination_clss=None
    permission_classes=(IsAuthenticated,IsProjectUser,)

    def get(self,request,*args,**kwargs):
        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        unannotated_docs = [doc for doc in project.documents.filter(annotated=False)]
        labels=project.labels.all()
        print(unannotated_docs)
        if not len(unannotated_docs):
            return Response(200)
        user = self.request.user
        setting_queryset = Setting.objects.all()
        serializer_class = SettingSerializer
        setting_queryset = setting_queryset.filter(project=project, user=user)
        setting_obj = get_object_or_404(setting_queryset)
        setting_data = serializer_class(setting_obj).data
        opt_o = setting_data['onlinelearning']
        opt_h = setting_data['history']
        opt_r = setting_data['rule']
        for doc in unannotated_docs:
            doc_id=doc.id
            recommendation = get_recommendation(doc_id, project, user, opt_o, opt_h, opt_r)
            for rec in recommendation:
                start_offset=rec['start_offset']
                end_offset=rec['end_offset']
                label_name=rec['label']
                word=rec['word']
                if label_name:
                    label = labels.filter(text=label_name)[0]
                    obj = SequenceAnnotation(user=user,document=doc, label=label,
                                             start_offset=start_offset, end_offset=end_offset, text=word)
                else:
                    obj = SequenceAnnotation(user=user, document=doc, label=None,
                                             start_offset=start_offset, end_offset=end_offset, text=word)
                obj.save()
            _=get_relation_recommendation(doc_id, project,auto=True)
        for doc in unannotated_docs:
            doc.annotated=True
            doc.save()
        return Response(200)


class BaichuanView(APIView):
    import requests
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser,)

    def get(self, request, *args, **kwargs):
        Baichuan_PROMPT = (
            'Instruction:Please list all entity words in the text that fit the category. '
            'Output format is "type1: word1; type2: word2". '
            'Option: {option} '
            'Text: {text} Answer:'
        )

        def openai_chat_completion_response(final_prompt):
            llmdata = {
                    "model": "string",
                    "messages": [
                        {
                        "role": "user",
                        "content": final_prompt,
                        }
                    ],
                    "temperature": 0.7,
                    "top_p": 0.95,
                    "n": 1,
                    "max_tokens": 1024,
                    "stream": False,
            }
            print(llmdata,flush=True)
            response = requests.post("http://czmwyw.natappfree.cc/v1/chat/completions",json = llmdata)
            print(response.text,flush=True)
            data = json.loads(response.text)
            choices = data['choices']
            first_choice_content = choices[0]['message']['content']
            print("raw choice content",first_choice_content,flush=True)
            return first_choice_content
            # return response['choices'][0]['message']['content'].strip("\n")

        def parse_entities(text: str) -> Dict[str, List[str]]:
            entities = {}
            # 先根据分号分割文本
            parts = [part.strip() for part in text.split(';') if part.strip()]
            # 然后解析每一个键值对
            for part in parts:
                match = re.match(r'([^:]+):\s*(.+)', part)
                if match:
                    concept = match.group(1).strip()
                    entity = match.group(2).strip()
                    if concept in entities:
                        entities[concept].append(entity)
                    else:
                        entities[concept] = [entity]
            return entities
        def process_text(response_text: str, doc_id: Any,doc_text) -> Response:
            """ 处理文本并返回预测的建议。"""
            result = {"entity": parse_entities(response_text)}
            predict_recommendations = []
            print("返回的实体列表：",result,flush=True)
            for type, entities in result['entity'].items():
                for entity in entities:
                    start_offset, end_offset = find_offset(doc_text, entity)
                    if start_offset != -1 and end_offset != -1:
                        predict_recommendations.append({
                            'label': type,
                            'start_offset': start_offset,
                            'end_offset': end_offset
                        })
            recommendations = {doc_id: predict_recommendations}
            final_result = {
                'indices': [doc_id],
                'recommendations': recommendations,
                'relations': {doc_id: result.get('relation', {})}
            }
            print('final result',final_result,flush=True)
            return Response(final_result)
        def find_offset(text, pattern):
            match = re.search(pattern, text)
            if match:
                span = match.span()
                return span[0], span[1]
            return -1,-1

        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        labels = project.labels.all()
        relations=project.relation_labels.all()
        entity_labels=[]
        relation_labels=[]
        for label in labels:
            entity_labels.append(label.text)
        for relation in relations:
            relation_labels.append(relation.relation_name)
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        baichuan_final_prompt = Baichuan_PROMPT.format(option=entity_labels,text=doc.text)
        # final_prompt = GUIDELINES_PROMPT.format(','.join(entity_labels), ','.join(relation_labels), doc.text)
        result = openai_chat_completion_response(baichuan_final_prompt)

        final_result = process_text(result, doc.id,doc.text)
        return final_result


class LLMView(APIView):
    pagination_class = None
    permission_classes = (IsAuthenticated, IsProjectUser,)

    def get(self, request, *args, **kwargs):
        import openai
        conf_path=str(os.path.join(BASE_DIR, "openai_conf"))
        file=open(conf_path)
        lines=file.readlines()
        file.close()
        # 服务器部署的时候记得取消注释
        openai.api_type = lines[0].strip()
        openai.api_base = lines[1].strip()
        openai.api_version = lines[2].strip()
        openai.api_key = lines[3].strip()
        SYSTEM_PROMPT = 'You are an excellent professional information extraction system. ' \
                        'The task is to locate and extract named entities offrom text and categorized their relations"' \
                        'The output should be in the following format:' \
                        '{"entity":{"entity label1":[e1,e2,...],"entity label2":[e3,e4,...],...},' \
                        '"relation":{"relation label1":[[e1,e2],[e3,e4],...],"relation label2":[[e5,e6],...]}}' \
                        'where relation label1:(e1,e2) means that there is relation1 between head entity e1 and tail entity e2' \
                        'You should only output the result. No extra explanation needed.'
        USER_PROMPT_1 = "Are you clear about your role?"
        ASSISTANT_PROMPT_1 = "Sure, I'm ready to help you with your information extraction task. " \
                             "Please provide me with the necessary information to get started."
        GUIDELINES_PROMPT = (
            "Entity labels: {},\n"
            "Relation labels: {}.\n"
            "Input: {}.\n"
            "Output:\n"
        )

        def openai_chat_completion_response(final_prompt):
            response = openai.ChatCompletion.create(
                engine=lines[4].strip(),
                # model = "gpt-3.5-turbo", # 本地部署使用
                messages=[
                    {"role": "system", "content": SYSTEM_PROMPT},
                    {"role": "user", "content": USER_PROMPT_1},
                    {"role": "assistant", "content": ASSISTANT_PROMPT_1},
                    {"role": "user", "content": final_prompt}
                ],
                temperature=0.7,
                max_tokens=800,
                top_p=0.95,
                frequency_penalty=0,
                presence_penalty=0,
                stop=None
            )

            return response['choices'][0]['message']['content'].strip("\n")

        def find_offset(text, pattern):
            match = re.search(pattern, text)
            if match:
                span = match.span()
                return span[0], span[1]
            return -1,-1

        project = get_object_or_404(Project, pk=self.kwargs['project_id'])
        labels = project.labels.all()
        relations=project.relation_labels.all()
        entity_labels=[]
        relation_labels=[]
        for label in labels:
            entity_labels.append(label.text)
        for relation in relations:
            relation_labels.append(relation.relation_name)
        doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
        final_prompt = GUIDELINES_PROMPT.format(','.join(entity_labels), ','.join(relation_labels), doc.text)
        result = openai_chat_completion_response(final_prompt)
        print(result)
        result = json.loads(result)
        predict_recommendations = []
        for type, entities in result['entity'].items():
            for entity in entities:
                if type in entity_labels:
                    start_offset, end_offset = find_offset(doc.text, entity)
                    if start_offset!=-1 and end_offset!=-1:
                        predict_recommendations.append({'label': type, 'start_offset': start_offset,
                                                        'end_offset': end_offset})
        recommendations={doc.id: predict_recommendations}
        final_result={
            'indices': [doc.id],
            'recommendations':recommendations,
            'relations':{doc.id:result['relation']}
        }
        return Response(final_result)


class CsrfTokenView(APIView):
    pagination_class=None
    permission_classes = []

    def get(self,request,*args,**kwargs):
        p = get_object_or_404(Project, pk=self.kwargs['project_id'])
        token = get_token(request)
        return HttpResponse(json.dumps({'token': token}), content_type="application/json,charset=utf-8")


class UserView(APIView):
    pagination_class=None
    permission_classes=[IsAuthenticated]

    def get(self,request,*args,**kwargs):
        user=request.user
        return HttpResponse(json.dumps({'user_id':user.id}),content_type="application/json,charset=utf-8")