from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
import jieba
import jieba.posseg as pseg
import nltk
from nltk.tokenize import word_tokenize
import ssl
import os

# 添加 SSL 证书验证例外
try:
    _create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
    pass
else:
    ssl._create_default_https_context = _create_unverified_https_context

# 设置 NLTK 数据路径
nltk_data_path = os.path.join(os.path.dirname(__file__), '../nltk_data')
os.environ['NLTK_DATA'] = nltk_data_path
nltk.data.path.append(nltk_data_path)  # 添加本地路径

# 确保 punkt 资源已下载
try:
    nltk.data.find('tokenizers/punkt')
except LookupError:
    print("Downloading punkt tokenizer...")
    nltk.download('punkt')

# 确保 averaged_perceptron_tagger 资源已下载
try:
    nltk.data.find('taggers/averaged_perceptron_tagger')
except LookupError:
    print("Downloading averaged_perceptron_tagger...")
    nltk.download('averaged_perceptron_tagger')

@api_view(['POST'])
def lexical_analysis(request):
    text = request.data.get('text', '')
    language = request.data.get('language', 'zh')
    
    try:
        if language == 'zh':
            # 中文分词
            tokens = list(jieba.cut(text))
            pos_tags = [(word, tag) for word, tag in pseg.cut(text)]
        else:
            # 英文分词
            tokens = word_tokenize(text)
            pos_tags = nltk.pos_tag(tokens)
        
        return Response({
            'tokens': tokens,
            'pos_tags': pos_tags,
            'status': 'success'
        })
    except Exception as e:
        return Response({
            'error': str(e),
            'status': 'error'
        }, status=400)
