from django.db.models import Q
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel

from hte.utils.general import load_request_body, get_json_field_r
from .models.material import MaterialCategory, MaterialTag
from .models.data import DataMeta, DataContent, SearchRecord
from .models.template import Template
from .models.dataset import DataSet
from .models.serializer import MaterialCategorySerializer, TemplateSerializer, MaterialTagSerializer, \
    DataMetaSerializer, DataSetSerializer
from .models.pagination import MyPagination
from .utils.general import general_response_data, get_all_children, json_response
from rest_framework.response import Response
import json
from .utils.serializer import JSONSerializer
from bson.objectid import ObjectId
from hte.error.handle import abort_on_error
from hte.error.models import HTEError
from apps.mlplatform.views import FormatListCreateAPIView
from apps.search.core.es_handler import ElasticsearchManager
from apps.mlplatform.views import FormatListCreateAPIView, FormatRetrivevAPIView
from rest_framework import status
from rest_framework.decorators import api_view
from django.conf import settings

from .utils.general import get_dataset_index
from apps.mlplatform.views import FormatListCreateAPIView
from apps.search.core.es_handler import ElasticsearchManager
from apps.analysis.tasks import on_view
from apps.accounts.auth import login_required_api

from .utils.general import get_dataset_info
from django.http import FileResponse
from .models.file import ObjectContentType, ObjectFile, ObjectFileType
from .utils.file import ObjectFileMaker
import logging
from apps.accounts.auth import check_login
import jieba
import redis
import os

logger = logging.getLogger('django')


class CategoryList_by_page(FormatListCreateAPIView):
    queryset = MaterialCategory.objects.all()
    serializer_class = MaterialCategorySerializer
    pagination_class = MyPagination


class CategoryDetail(FormatRetrivevAPIView):
    queryset = MaterialCategory.objects.all()
    serializer_class = MaterialCategorySerializer


# fsch time:2018-12-18
def material_category_tree(req):
    """获取材料分类树形结构，返回结构如下：
    [
      {"id": 1,
       "name": "category 1",
       "children": []
      },
      {"id": 2,
       "name": "category2",
       "children": [{"_id": "5a275921d0fd9d2d08e1c24f",
                     "name": "material class1",
                     "children": []
                    }]
      },
      ......
    ]
    """

    def get_children(cid):
        children = [{"id": it.id,
                     "name": it.name,
                     "children": get_children(it.id)} for it in
                    MaterialCategory.objects.filter(parent_id=cid, level__gt=1)]
        return children

    rs = [{"id": it.id,
           "name": it.name,
           "children": get_children(it.id)} for it in MaterialCategory.objects.filter(level=1)]  # 0 级为全局根节点
    return json_response(rs)


class TemplateList_by_page(FormatListCreateAPIView):
    queryset = Template.objects.all()
    serializer_class = TemplateSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        queryset = Template.objects.filter(published=True)
        return queryset


class TemplateList_by_user(FormatListCreateAPIView):
    serializer_class = TemplateSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        queryset = Template.objects.filter(author=self.request.user.username)
        return queryset


class TemplateList_by_user_asc(FormatListCreateAPIView):
    serializer_class = TemplateSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        # print(self.request.user.username)
        queryset = Template.objects.filter(author=self.request.user.username).order_by('pub_date')
        return queryset


class TemplateList_by_user_des(FormatListCreateAPIView):
    serializer_class = TemplateSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        # print(self.request.user.username)
        queryset = Template.objects.filter(author=self.request.user.username).order_by('-pub_date')
        return queryset


class TemplateList_all(FormatListCreateAPIView):
    queryset = Template.objects.all()
    serializer_class = TemplateSerializer

    def get_queryset(self):
        queryset = Template.objects.filter(published=True)
        return queryset

    def list(self, request, *args, **kwargs):
        queryset = self.filter_queryset(self.get_queryset())
        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            data = general_response_data(serializer.data)
            return self.get_paginated_response(data)
        serializer = self.get_serializer(queryset, many=True)
        data = general_response_data(serializer.data)
        return Response(data)


class TemplateDetail(RetrieveUpdateDestroyAPIView):
    queryset = Template.objects.all()
    serializer_class = TemplateSerializer

    def delete(self, request, *args, **kwargs):
        instance = self.get_object()
        self.perform_destroy(instance)
        data = general_response_data({})
        return Response(data, status=status.HTTP_204_NO_CONTENT)


class MaterialTagList(FormatListCreateAPIView):
    queryset = MaterialTag.objects.all()
    serializer_class = MaterialTagSerializer
    pagination_class = MyPagination


class MaterialTagDetail(FormatRetrivevAPIView):
    queryset = MaterialTag.objects.all()
    serializer_class = MaterialTagSerializer


class DataMetaList(FormatListCreateAPIView):
    queryset = DataMeta.objects.all()
    serializer_class = DataMetaSerializer
    pagination_class = MyPagination


class DataMeta_by_user(FormatListCreateAPIView):
    serializer_class = DataMetaSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        # print(self.request.user.username)
        queryset = DataMeta.objects.filter(author=self.request.user.username)
        return queryset


class DataMeta_by_user_asc(FormatListCreateAPIView):
    serializer_class = DataMetaSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        # print(self.request.user.username)
        queryset = DataMeta.objects.filter(author=self.request.user.username).order_by('add_time')
        return queryset


class DataMeta_by_user_des(FormatListCreateAPIView):
    serializer_class = DataMetaSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        # print(self.request.user.username)
        queryset = DataMeta.objects.filter(author=self.request.user.username).order_by('-add_time')
        return queryset


class DataMetaDetail(FormatRetrivevAPIView):
    queryset = DataMeta.objects.all()
    serializer_class = DataMetaSerializer

    def retrieve(self, request, *args, **kwargs):
        instance = self.get_object()
        # instance.views += 1
        on_view(instance.id)
        # instance.save()
        serializer = self.get_serializer(instance)
        data = general_response_data(serializer.data)
        return Response(data)

    def update(self, request, *args, **kwargs):
        instance = self.get_object()
        content = request.data.get('content', False)
        if content:
            dc = DataContent.objects.get(id=instance.dc_id)
            for key in content.keys():
                setattr(dc, key, content.get(key))
            dc.save()
        partial = kwargs.pop('partial', False)
        serializer = self.get_serializer(instance, data=request.data['meta'], partial=partial)
        serializer.is_valid(raise_exception=True)
        self.perform_update(serializer)

        if getattr(instance, '_prefetched_objects_cache', None):
            # If 'prefetch_related' has been applied to a queryset, we need to
            # forcibly invalidate the prefetch cache on the instance.
            instance._prefetched_objects_cache = {}

        return Response(serializer.data)

    def delete(self, request, *args, **kwargs):
        instance = self.get_object()
        DataContent.objects.get(id=instance.dc_id).delete()
        self.perform_destroy(instance)
        data = general_response_data({})
        return Response(data, status=status.HTTP_204_NO_CONTENT)


@login_required_api
@api_view(['POST'])
def DataMeta_add(request):
    '''
    上传完整数据
    :param request:
    :return:
    '''
    if request.method == 'POST':
        dm = None
        try:
            # print(request.user.username)
            data = json.loads(request.body)
            meta = data['meta']
            content = data['content']
            # print(meta)
            # print(content)
            t = Template.objects.get(pk=meta.get('tid'))
            js = JSONSerializer(t)
            dc_id = ObjectId()
            dm = DataMeta.add(meta.get('title'), meta.get('category'), meta.get('tid'),
                              meta.get('source'), meta.get('keywords'), dc_id, meta.get('doi'),
                              meta.get('abstract'), meta.get('purpose'), request.user)  ##添加当前用户username（fsch)
            true_content = dict(_id=dc_id)
            for name, serializer in js.serializers.items():
                true_content[name] = serializer.deserialize(content.get(name), owner_id=dc_id, meta_id=dm.id, tid=t.id,
                                                            author=request.user.username)  # 这里的name什么作用？(fsch)
            # dc = DataContent()
            true_content['_meta_id'] = dm.id
            true_content['_tid'] = dm.tid
            # for key in true_content.keys():
            #     setattr(dc, key, true_content.get(key))
            # dc.save()
            dc = DataContent.add(dm.id, t.id, true_content)
            dm.dc_id = str(dc.id)
            dm.save()
            # print(content)
            #  添加数据成功的同时将数据同步到ES中
            ElasticsearchManager.insert(dm.id)
            ElasticsearchManager.param_merge_insert(dm.id)
        except Exception as ex:
            logger.info(ex)
            if dm and dm.pk:
                dm.delete()
            abort_on_error(HTEError.BAD_DATA)
        else:
            Template.inc_ref(t)
            serializer = DataMetaSerializer(dm)
            return Response(serializer.data, status.HTTP_201_CREATED)
    else:
        abort_on_error(HTEError.METHOD_NOT_ALLOWED)


# 保存每个用户最近一次的搜索记录
def save_record(request):
    text = get_json_field_r(request, 'text', str, allow_none=True, default='')
    # data = json.loads(request.body)
    # text2=data['text']
    # print(text)
    # print(text2)
    # 迁移到postgresql
    # r = redis.Redis(host='localhost', port=6379, db=0,decode_responses=True)
    user = request.user.username
    # print(user)
    # if r.exists(user):
    #     r.delete(user)
    # 更新值,迁移到postgresql
    # r.set(user,text)
    # print(r.get(user))
    SearchRecord.objects.create(record=text, author_id=user)
    # print(SearchRecord.objects.all())
    return json_response()


# 推荐：基于所查看的datameta和当前用户的搜索记录
class Recommend(FormatListCreateAPIView):
    serializer_class = DataMetaSerializer
    pagination_class = MyPagination
    MyPagination.page_size = 6  # 手动修改了每页显示的条数

    def get_queryset(self):
        # 获取当前datameta的id
        show_id = int(self.request.GET.get('id'))
        # print(show_id)
        # instance = self.get_object()
        # show_id=instance.id
        # print(instance)

        # 计算数据库中所有datameta与当前datameta的相似度
        # r = redis.Redis(host='localhost', port=6379, db=0,decode_responses=True)  # redis实例是线程安全的，可以考虑设为全局变量
        queryset = DataMeta.objects.filter(id=show_id).values('id', 'title', 'abstract')
        queryset_plus = DataMeta.objects.filter(~Q(id=show_id)).order_by('-add_time')[:1000].values('id', 'title', 'abstract')
        # print(queryset)
        id = []
        abstract = []
        corpus = []
        for i in queryset:
            id.append(i['id'])
            try:
                abstract.append(i['title'] + ',' + i['abstract'])
            except:
                pass
        for i in queryset_plus:
            id.append(i['id'])
            try:
                abstract.append(i['title'] + ',' + i['abstract'])
            except:
                pass
        # print(id)
        # print(abstract)

        # 加载j自定义iaba字典，字典路径
        # jieba.load_userdict()

        for i in abstract:
            seg = jieba.cut(i, cut_all=False)
            corpus.append(" ".join(seg))
        # print(corpus)
        #
        tfidf_model = TfidfVectorizer().fit(corpus)
        tfidf_result = tfidf_model.transform(corpus)
        # print(tfidf_model.vocabulary_)
        # print(tfidf_model.get_feature_names())
        # print(tfidf_result)
        # print(tfidf_result.todense())
        sim_matrix = linear_kernel(tfidf_result, tfidf_result)
        # print(sim_matrix)

        # 获取id索引值
        # print(len(id))
        # print(id)
        idx = id.index(show_id)
        # print(idx)

        sim = sim_matrix[idx]
        # print(sim)

        # 如果当前用户有搜索记录，则混合两者的相似度
        user = self.request.user.username
        # 迁移到postgresql
        is_searched = SearchRecord.objects.filter(author_id=user).exists()
        # if r.exists(user):
        #     search_record=r.get(user)
        if is_searched:
            search_record = ""
            query_search = SearchRecord.objects.filter(author_id=user).order_by('search_time')[:10].values('record')
            # print(query_search)
            for i in query_search:
                search_record += i['record'] + ' '
            # print(search_record)
            # id.append(0)#自增id从1开始
            # abstract.append(search_record)
            # print(id)
            # print(abstract)
            seg = jieba.cut(search_record, cut_all=False)
            corpus.append(" ".join(seg))
            # print(corpus)
            tfidf_model_search = TfidfVectorizer().fit(corpus)
            tfidf_result_search = tfidf_model_search.transform(corpus)
            # print(tfidf_model_search.vocabulary_)
            # print(tfidf_model_search.get_feature_names())
            # print(tfidf_result_search)
            # print(tfidf_result_search.todense())
            sim_matrix_search = linear_kernel(tfidf_result_search, tfidf_result_search)
            # print(sim_matrix_search)
            sim_search = sim_matrix_search[-1][:-1]
            # print(sim_search)
            sim_new = 0.5 * (sim + sim_search)
        else:
            sim_new = sim
        # print(sim_new)

        # 推荐最相似的datameta，获取他们相应的id
        similar_indices = sim_new.argsort()[:-6:-1]  # 取前6位用于推荐
        if idx in similar_indices:
            similar_indices = list(similar_indices)
            similar_indices.remove(idx)
        else:
            similar_indices = similar_indices[:-1]
        # print(similar_indices)
        similar_items = []
        for i in similar_indices:
            similar_items.append(id[i])
        # print('ok')
        # print(similar_items)

        queryset = DataMeta.objects.filter(
            Q(id=similar_items[0]) | Q(id=similar_items[1]) | Q(id=similar_items[2]) | Q(id=similar_items[3]))
        # Q(id=similar_items[]) | Q(nl=22)
        # queryset = DataMeta.objects.filter(id=257)
        return queryset


# fsch : 针对不同类型的数据样本（不同家单位的）
@api_view(['POST'])
def DataMeta_add_auto(req):
    # print(req.user)
    # print(req.user.username)
    check_login()
    dm = None
    try:
        file_urls = list()
        file_bmp = req.FILES.get('file_bmp')
        file_txt = req.FILES.get('file_txt')
        file_hys = req.FILES.get('file_hys')

        of = ObjectFile.add(ObjectFileType.DATA_CONTENT, ObjectContentType.DATA_FILE, file_bmp, name=file_bmp.name,
                            author=req.user.username)
        file_urls.append(of.get_file_url())
        of = ObjectFile.add(ObjectFileType.DATA_CONTENT, ObjectContentType.DATA_FILE, file_txt, name=file_txt.name,
                            author=req.user.username)
        file_urls.append(of.get_file_url())
        of = ObjectFile.add(ObjectFileType.DATA_CONTENT, ObjectContentType.DATA_FILE, file_hys, name=file_hys.name,
                            author=req.user.username)
        file_urls.append(of.get_file_url())

        meta = req.POST.get('meta')
        content = req.POST.get('content')
        dict_meta = json.loads(meta)
        dict_content = json.loads(content)

        dict_content['样品描述']['微观组织特征']['组织照片'] = [file_urls[0]]
        dict_content['实验结果']['结果文件']['力、位移、时间：txt'] = [file_urls[1]]
        dict_content['实验结果']['结果文件']['面积函数 hys ara'] = [file_urls[2]]

        # print(dict_meta)
        # print(dict_content)

        t = Template.objects.get(pk=dict_meta.get('tid'))
        js = JSONSerializer(t)
        dc_id = ObjectId()
        dm = DataMeta.add(dict_meta.get('title'), dict_meta.get('category'), dict_meta.get('tid'),
                          dict_meta.get('source'), dict_meta.get('keywords'), dc_id, dict_meta.get('doi'),
                          dict_meta.get('abstract'), dict_meta.get('purpose'), req.user)  # 还需修改，用户不能是''空
        true_content = dict(_id=dc_id)
        for name, serializer in js.serializers.items():
            true_content[name] = serializer.deserialize(dict_content.get(name), owner_id=dc_id, meta_id=dm.id,
                                                        tid=t.id,
                                                        author=req.user.username)
        # # DataContent.add(dm.id, t.id, true_content)
        # dc = DataContent()
        # dict_content['_meta_id'] = dm.id
        # dict_content['_tid'] = dm.tid
        # for key in dict_content.keys():
        #     setattr(dc, key, dict_content.get(key))
        # dc.save()
        # dm.dc_id = str(dc.id)
        # dm.save()
        true_content['_meta_id'] = dm.id
        true_content['_tid'] = dm.tid
        dc = DataContent.add(dm.id, t.id, true_content)
        dm.dc_id = str(dc.id)
        dm.save()
        #  添加数据成功的同时将数据同步到ES中
        ElasticsearchManager.insert(dm.id)
        ElasticsearchManager.param_merge_insert(dm.id)

    except Exception as ex:
        logger.info(ex)
        if dm and dm.pk:
            dm.delete()
        abort_on_error(HTEError.BAD_DATA, str(ex))
    else:
        Template.inc_ref(t)
        serializer = DataMetaSerializer(dm)
        # return Response(serializer.data, status.HTTP_201_CREATED)
        return json_response("Saved Succeccfully!", status_code=201)


@api_view(['POST'])
def DataMeta_add_auto2(req):
    # print(req.user)
    # print(req.user.username)
    check_login()
    dm = None
    try:
        meta = req.POST.get('meta')
        content = req.POST.get('content')
        dict_meta = json.loads(meta)
        dict_content = json.loads(content)

        # print(dict_meta)
        # print(dict_content)

        t = Template.objects.get(pk=dict_meta.get('tid'))
        js = JSONSerializer(t)
        dc_id = ObjectId()
        dm = DataMeta.add(dict_meta.get('title'), dict_meta.get('category'), dict_meta.get('tid'),
                          dict_meta.get('source'), dict_meta.get('keywords'), dc_id, dict_meta.get('doi'),
                          dict_meta.get('abstract'), dict_meta.get('purpose'), req.user)  # 还需修改，用户不能是''空
        true_content = dict(_id=dc_id)
        for name, serializer in js.serializers.items():
            true_content[name] = serializer.deserialize(dict_content.get(name), owner_id=dc_id, meta_id=dm.id,
                                                        tid=t.id,
                                                        author=req.user.username)
        # fsc 注释掉：
        # # DataContent.add(dm.id, t.id, true_content)
        # dc = DataContent()
        # dict_content['_meta_id'] = dm.id
        # dict_content['_tid'] = dm.tid
        # for key in dict_content.keys():
        #     setattr(dc, key, dict_content.get(key))
        # dc.save()
        # dm.dc_id = str(dc.id)
        # dm.save()

        # 与手动添加代码一致
        true_content['_meta_id'] = dm.id
        true_content['_tid'] = dm.tid
        dc = DataContent.add(dm.id, t.id, true_content)
        dm.dc_id = str(dc.id)
        dm.save()
        #  添加数据成功的同时将数据同步到ES中
        ElasticsearchManager.insert(dm.id)
        ElasticsearchManager.param_merge_insert(dm.id)
    except Exception as ex:
        if dm and dm.pk:
            dm.delete()
        abort_on_error(HTEError.BAD_DATA, str(ex))
    else:
        # Template.inc_ref(t)
        # serializer = DataMetaSerializer(dm)
        # return Response(serializer.data, status.HTTP_201_CREATED)
        return json_response("Saved Succeccfully!", status_code=201)


@api_view(['POST', 'DELETE'])
def uploaded_data_content_file(req):
    """
    post: 提交数据文件（数据文件格式需符合原来生成的模板）
        params: 数据放在 POST 部分
            file: 提交的数据文件
        return: 提交成功的文件的 id
    delete:
        params: none
        return: none
    """
    check_login()  # 仅登录用户可以获取或提交文件
    if req.method == 'POST':
        try:
            c_t = ObjectContentType.DATA_IMAGE if req.POST.get('type') == 'image' else ObjectContentType.DATA_FILE
            if not req.FILES:
                abort_on_error(HTEError.BAD_DATA, 'File must be set.')

            file_urls = list()
            for file in req.FILES.getlist('files'):
                of = ObjectFile.add(ObjectFileType.DATA_CONTENT, c_t, file, name=file.name, author=req.user.username)
                file_urls.append(of.get_file_url())
            return json_response(file_urls)
        except ValueError as ex:
            abort_on_error(HTEError.BAD_PARAMETER, str(ex))
    elif req.method == 'DELETE':
        file_urls = load_request_body(req, list)
        for url in file_urls:
            try:
                f = ObjectFile.objects.get(file=url, misc__author=req.user.username)  # 保证删除的是自己的文件
                f.delete()
            except ObjectFile.DoesNotExist:
                pass
        return json_response(status_code=204)


def DataMeta_download(request):
    try:
        oid = request.GET.get('oid')
        c_t = ObjectContentType[request.GET.get('type', '').upper()]
        of = ObjectFile.objects.filter(misc__oid=oid, c_type=c_t).first()
        if not of:
            dm = DataMeta.objects.get(pk=oid)
            template = Template.objects.get(pk=dm.tid)
            of_maker = ObjectFileMaker(ObjectFileType.DATA, c_t)
            of = of_maker.save(template, queryset=[dm], oid=oid)
        # on_download(oid)  # 更新下载量
        return json_response(of.to_dict())
    except ValueError as ex:
        # abort_on_error(HTEError.BAD_PARAMETER, str(ex))
        raise
    except ObjectFile.DoesNotExist:

        abort_on_error(HTEError.NOT_FOUND)


class DataSetList(FormatListCreateAPIView):
    queryset = DataSet.objects.all()
    serializer_class = DataSetSerializer
    pagination_class = MyPagination
    parser_classes = (MultiPartParser,)

    def create(self, request, *args, **kwargs):
        # print(self.request.user.username)
        serializer = self.get_serializer(data=request.data)
        serializer.is_valid(raise_exception=True)
        self.perform_create(serializer)
        headers = self.get_success_headers(serializer.data)
        dataset = DataSet.objects.get(pk=serializer.data['id'])
        dataset.author = request.user
        indexs, rows, cols = get_dataset_info(dataset)
        dataset.indexs = indexs
        dataset.rows = rows
        dataset.cols = cols
        dataset.save()
        serializer_with_index = DataSetSerializer(instance=dataset)
        data = general_response_data(serializer_with_index.data)
        return Response(data, status=status.HTTP_201_CREATED, headers=headers)


class DataSet_by_user(FormatListCreateAPIView):
    serializer_class = DataSetSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        queryset = DataSet.objects.filter(author=self.request.user)
        return queryset


class DataSet_by_user_asc(FormatListCreateAPIView):
    serializer_class = DataSetSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        queryset = DataSet.objects.filter(author=self.request.user).order_by('upload_time')
        return queryset


class DataSet_by_user_des(FormatListCreateAPIView):
    serializer_class = DataSetSerializer
    pagination_class = MyPagination

    def get_queryset(self):
        queryset = DataSet.objects.filter(author=self.request.user).order_by('-upload_time')
        return queryset


class DataSetDetail(FormatRetrivevAPIView):
    queryset = DataSet.objects.all()
    serializer_class = DataSetSerializer
    parser_classes = (MultiPartParser, FormParser)

    def retrieve(self, request, *args, **kwargs):
        instance = self.get_object()
        instance.views += 1
        instance.save()
        serializer = self.get_serializer(instance)
        data = general_response_data(serializer.data)
        return Response(data)


class DataSetAdd(APIView):
    parser_classes = (MultiPartParser, FormParser)

    def post(self, request, *args, **kwargs):
        file_serializer = DataSetSerializer(data=request.data)
        if file_serializer.is_valid():
            file_serializer.save()
            return Response(file_serializer.data, status=201)
        else:
            return Response(file_serializer.errors, status=400)


@api_view(["GET", "POST", "DELETE"])
def dataset_download(request, pk):
    if request.method == 'GET':
        try:
            dataset = DataSet.objects.get(pk=pk)
        except DataSet.DoesNotExist:
            abort_on_error(HTEError.NOT_FOUND)
        file_path = dataset.get_abs_file_path()
        file_name = file_path.split('/')[-1]
        response = FileResponse(open(file_path, 'rb'))
        response['Content-Disposition'] = "attachment; filename=%s" % file_name
        # dataset.on_download()
        dataset.downloads += 1
        dataset.save()
        return response
    else:
        abort_on_error(HTEError.METHOD_NOT_ALLOWED)


def get_category_as_tree(request):
    if request.method == 'GET':
        parents = MaterialCategory.objects.filter(level=0)
        data = []
        for parent in parents:
            data_child = get_all_children(parent)
            data.append(data_child)
        return json_response(data)
    else:
        abort_on_error(HTEError.METHOD_NOT_ALLOWED)


def template_file(req, tid):
    """
        get: 获取数据模板文件下载链接
            params: 数据放在 GET 部分
                type: str, required，需导出的文件类型，值为 XLSX/JSON/XML 中的一种
            return: 返回文件下载链接
        """
    try:
        c_t = ObjectContentType[req.GET.get('type', '').upper()]
        of = ObjectFile.objects.filter(misc__tid=tid, c_type=c_t).first()
        if not of:
            template = Template.objects.get(pk=tid)
            of_maker = ObjectFileMaker(ObjectFileType.TEMPLATE, c_t)
            of = of_maker.save(template)
        return json_response(of.to_dict())
    except KeyError:
        abort_on_error(HTEError.BAD_PARAMETER, 'Data type must be "XLSX", "XML" or "JSON"!')
    except Template.DoesNotExist:
        abort_on_error(HTEError.NOT_FOUND, 'Template (%s) not found' % tid)


'''
文件上传数据失败,提示没有uploaded_file,因此yp在这里进行尝试补充
'''
from apps.storage.models.file import ObjectContentType
from json.decoder import JSONDecodeError
from apps.storage.utils.serializers.common import ParsingError
@login_required_api
@api_view(['POST'])
def uploaded_file(req):
    """
    get: 获取当前登录用户最新上传的数据文件，默认返回前 10 个
        params:
            page: int，当前想要获取的页数，默认为 1
            per_page: int，每一页的数目，默认为 10
        return: 返回需要的文件信息列表
    post: 提交数据文件（数据文件格式需符合原来生成的模板）
        params: 数据放在 POST 部分
            file: 提交的数据文件
        return: 提交成功的文件的 id
    delete:
        params: none
        return: none
    """
    check_login()  # 仅登录用户可以获取或提交文件
    if req.method == 'GET':
        try:
            per_page = max(1, int(req.GET.get('per_page', 10)))
            page = max(1, int(req.GET.get('page', 1)))

            qs = ObjectFile.objects.filter(ObjectFileType.UPLOADED, misc__author=req.user.username)
            total = qs.count()  # 符合条件的模板总数
            end = page * per_page
            start = end - per_page
            rs = [file.to_dict() for file in qs[start:end]]
            return json_response(rs, total=total)
        except ValueError:
            abort_on_error(HTEError.BAD_DATA)
    elif req.method == 'POST':
        try:
            if not req.FILES:
                abort_on_error(HTEError.BAD_DATA, 'File must be set.')
            file = req.FILES.get('file')
            ext = os.path.splitext(file.name)[1].upper()
            if ext in ('.XLSX', '.JSON', '.XLSX'):
                c_t = ObjectContentType[ext[1:]]
            elif ext in ('.RAR', '.ZIP', '.7Z', '.GZ', '.XZ', '.BZ2', '.TAR'):
                c_t = ObjectContentType.ARCHIVE
            else:
                abort_on_error(HTEError.BAD_PARAMETER, 'Data type `%s` is not supported!' % ext)

            of_maker = ObjectFileMaker(ObjectFileType.UPLOADED, c_t)
            of = of_maker.save(file=file, author=req.user.username)
            return json_response(of.id)
        except JSONDecodeError as ex:
            abort_on_error(HTEError.BAD_JSON, str(ex))
        except ValueError as ex:
            abort_on_error(HTEError.BAD_DATA, str(ex))
        except ParsingError as ex:
            abort_on_error(HTEError.BAD_EXCEL, str(ex))
        # except Exception as ex:
        #     abort_on_error(HTEError.UNKNOWN_ERROR, str(ex))
    elif req.method == 'DELETE':
        fids = load_request_body(req, list)
        for fid in fids:
            try:
                f = ObjectFile.objects.get(pk=fid, misc__author=req.user.username)  # 保证删除的是自己的文件
                f.delete()
            except ObjectFile.DoesNotExist:
                pass
        return json_response(status_code=204)
