
# import hanlp
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud
# from django.shortcuts import render, redirect, get_object_or_404
# from .models import TextEntry, HelpDocument
# import os
# from django.contrib.auth import authenticate, login
# from django.contrib.auth.models import User
# from django.contrib import messages
# # 加载分词器和词性标注模型
# tokenizer = hanlp.load('CTB6_CONVSEG')  # 分词模型
# pos_tagger = hanlp.load('CTB9_CON_ELECTRA_SMALL')  # 词性标注模型
#
# def create_wordcloud(text, filename):
#     # 更新此路径为您的字体文件实际路径
#     font_path = os.path.join('analysis/static/fonts', 'SimHei.ttf')
#     wordcloud = WordCloud(font_path=font_path).generate(text)
#     plt.figure(figsize=(10, 5))
#     plt.imshow(wordcloud, interpolation='bilinear')
#     plt.axis('off')
#     plt.savefig(filename)
#     plt.close()
#
#
# def index(request):
#     if request.method == 'POST':
#         original_text = request.POST['text']
#
#         # 分词
#         segmented = tokenizer(original_text)
#
#         # 词性标注
#         pos_tags = pos_tagger(segmented)
#         # 格式化分词和词性结果
#         segmented_text = ' '.join(segmented)
#         pos_text = ' '.join([f"{word}/{tag}" for word, tag in zip(segmented, pos_tags)])
#
#         # 保存到数据库
#         text_entry = TextEntry(
#             original_text=original_text,
#             segmented_text=segmented_text,
#             part_of_speech=pos_text
#         )
#         text_entry.save()
#         # 生成词云
#         wordcloud_filename = f'analysis/static/img/wordcloud.png'
#         create_wordcloud(' '.join(segmented), wordcloud_filename)
#
#         return redirect('result', entry_id=text_entry.id)
#
#     return render(request, 'index.html')
#
# def result(request, entry_id):
#     text_entry = get_object_or_404(TextEntry, id=entry_id)
#
#     original_text = text_entry.original_text
#     segmented_text = text_entry.segmented_text
#     pos_text = text_entry.part_of_speech
#
#     # 将词性标注转换为包含样式的HTML
#     styled_pos_text = ""
#     for item in pos_text.split():
#         if '/' in item:  # 只有当项包含 '/' 时才进行拆分
#             word, tag = item.rsplit('/', 1)  # 分割词和词性
#             if tag.startswith('n'):  # 以 'n' 开头的词性标记为名词
#                 styled_pos_text += f'<span class="color-noun">{word}</span> '
#             elif tag.startswith('v'):  # 以 'v' 开头的词性标记为动词
#                 styled_pos_text += f'<span class="color-verb">{word}</span> '
#             elif tag.startswith('a'):  # 以 'a' 开头的词性标记为形容词
#                 styled_pos_text += f'<span class="color-adjective">{word}</span> '
#             elif tag.startswith('d'):  # 以 'd' 开头的词性标记为副词
#                 styled_pos_text += f'<span class="color-adverb">{word}</span> '
#             else:
#                 styled_pos_text += f'{word} '  # 其他词性不加样式
#         else:
#             styled_pos_text += f'{item} '  # 没有词性的项直接添加，不加样式
#
#     context = {
#         'original_text': original_text,
#         'segmented_text': segmented_text,
#         'part_of_speech': styled_pos_text,  # 更新为带样式的文本
#         'uploaded_file_url': text_entry.uploaded_file.url if text_entry.uploaded_file else None,
#     }
#
#     return render(request, 'result.html', context)
#
#
#
#
# # 用户登录
# def login_view(request):
#     if request.method == 'POST':
#         username = request.POST['username']
#         password = request.POST['password']
#         user = authenticate(request, username=username, password=password)
#         if user is not None:
#             login(request, user)
#             return redirect('home')  # 登录后重定向到主页
#         else:
#             messages.error(request, '用户名或密码错误')
#     return render(request, 'login.html')
#
# # 用户注册
# def register_view(request):
#     if request.method == 'POST':
#         username = request.POST['username']
#         email = request.POST['email']
#         password = request.POST['password']
#         try:
#             user = User.objects.create_user(username=username, email=email, password=password)
#             user.save()
#             messages.success(request, '注册成功')
#             return redirect('login')
#         except Exception as e:
#             messages.error(request, str(e))
#     return render(request, 'register.html')
#
# def help_document_view(request):
#     documents = HelpDocument.objects.all()
#     return render(request, 'help.html', {'documents': documents})


# def result(request, entry_id):
#     text_entry = TextEntry.objects.get(id=entry_id)
#
#     # 处理词性标注结果，确保只有有效的二元组
#     pos_tuples = []
#     for pos in text_entry.part_of_speech.split():
#         parts = pos.split('/')
#         if len(parts) == 2:  # 确保有两个部分
#             pos_tuples.append(parts)
#
#     return render(request, 'result.html', {
#         'text_entry': text_entry,
#         'pos_tuples': pos_tuples
#     })

import hanlp
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from django.shortcuts import render, redirect, get_object_or_404
from .models import TextEntry, HelpDocument, CustomUser
import os
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib import messages
from collections import Counter

# 加载分词器和词性标注模型
tokenizer = hanlp.load('CTB6_CONVSEG')  # 分词模型
pos_tagger = hanlp.load('CTB9_CON_ELECTRA_SMALL')  # 词性标注模型

def create_wordcloud(text, filename):
    font_path = os.path.join('analysis/static/fonts', 'SimHei.ttf')
    wordcloud = WordCloud(font_path=font_path).generate(text)
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.savefig(filename)
    plt.close()

def index(request):
    return render(request, 'index.html')

from django.shortcuts import render, redirect
from django.contrib import messages
from django.db import connection
from django.contrib.sessions.models import Session

def login_view(request):
    if request.method == 'POST':
        username = request.POST['username']
        password = request.POST['password']

        try:
            # 直接从 analysis_CustomUser 表中查询用户信息
            with connection.cursor() as cursor:
                cursor.execute(
                    "SELECT password FROM analysis_CustomUser WHERE username = %s",
                    [username]
                )
                row = cursor.fetchone()

            if row:
                stored_password = row[0]
                if stored_password == password:  # 简单比较密码（生产环境需使用加密）
                    # 登录成功，设置会话
                    request.session['username'] = username
                    messages.success(request, '登录成功')
                    return redirect('home')  # 替换为主页 URL 名称
                else:
                    messages.error(request, '密码错误')
            else:
                messages.error(request, '用户不存在')
        except Exception as e:
            messages.error(request, f'登录失败: {str(e)}')

    return render(request, 'login.html')


def some_view(request):
    if 'username' not in request.session:
        return redirect('login')  # 未登录时重定向到登录页面
    # 用户已登录，可以继续处理视图逻辑


def logout_view(request):
    request.session.flush()  # 清除所有会话数据
    messages.success(request, '已成功退出登录')
    return redirect('login')


# def login_view(request):
#     if request.method == 'POST':
#         username = request.POST['username']
#         password = request.POST['password']
#         user = authenticate(request, username=username, password=password)
#         if user is not None:
#             login(request, user)
#             return redirect('home')  # 登录后重定向到主页
#         else:
#             messages.error(request, '用户名或密码错误')
#     return render(request, 'login.html')


from django.shortcuts import render, redirect
from django.contrib import messages
from .models import CustomUser  # 导入自定义模型
def register_view(request):
    if request.method == 'POST':
        username = request.POST['username']
        email = request.POST['email']
        password = request.POST['password']
        try:
            # 保存到数据库
            user = CustomUser.objects.create(username=username, email=email, password=password)
            user.save()
            messages.success(request, '注册成功')
            return redirect('login')
        except Exception as e:
            messages.error(request, f'注册失败: {str(e)}')
    return render(request, 'register.html')


# def register_view(request):
#     if request.method == 'POST':
#         username = request.POST['username']
#         email = request.POST['email']
#         password = request.POST['password']
#         try:
#             user = User.objects.create_user(username=username, email=email, password=password)
#             user.save()
#             messages.success(request, '注册成功')
#             return redirect('login')
#         except Exception as e:
#             messages.error(request, str(e))
#     return render(request, 'register.html')

def segment_text(request):
    if request.method == 'POST':
        original_text = request.POST['text']

        # 分词
        segmented = tokenizer(original_text)

        # 词性标注
        pos_tags = pos_tagger(segmented)
        segmented_text = ' '.join(segmented)
        pos_text = ' '.join([f"{word}/{tag}" for word, tag in zip(segmented, pos_tags)])

        # 保存到数据库
        text_entry = TextEntry(
            original_text=original_text,
            segmented_text=segmented_text,
            part_of_speech=pos_text
        )
        text_entry.save()

        # 生成词云
        wordcloud_filename = f'analysis/static/img/wordcloud.png'
        create_wordcloud(' '.join(segmented), wordcloud_filename)

        return redirect('result', entry_id=text_entry.id)

    return render(request, 'segment.html')


def result(request, entry_id):
    text_entry = get_object_or_404(TextEntry, id=entry_id)

    original_text = text_entry.original_text
    segmented_text = text_entry.segmented_text
    pos_text = text_entry.part_of_speech

    # 计算词频
    words = segmented_text.split()  # 将分词的文本拆分成单个词
    frequency = Counter(words)  # 计算每个词的频率

    styled_pos_text = ""
    for item in pos_text.split():
        if '/' in item:  # 只有当项包含 '/' 时才进行拆分
            word, tag = item.rsplit('/', 1)  # 分割词和词性
            if tag.startswith('n'):
                styled_pos_text += f'<span class="color-noun">{word}</span> '
            elif tag.startswith('v'):
                styled_pos_text += f'<span class="color-verb">{word}</span> '
            elif tag.startswith('a'):
                styled_pos_text += f'<span class="color-adjective">{word}</span> '
            elif tag.startswith('d'):
                styled_pos_text += f'<span class="color-adverb">{word}</span> '
            else:
                styled_pos_text += f'{word} '
        else:
            styled_pos_text += f'{item} '

    context = {
        'original_text': original_text,
        'segmented_text': segmented_text,
        'part_of_speech': styled_pos_text,
        'word_frequency': dict(frequency),  # 转换为字典
    }

    return render(request, 'result.html', context)


# text_analysis/views.py
from django.shortcuts import render, redirect
from .models import HelpDocument
from .forms import FeedbackForm


from django.contrib import messages

def help_document_view(request):
    documents = HelpDocument.objects.all()

    if request.method == 'POST':
        form = FeedbackForm(request.POST)
        if form.is_valid():
            form.save()  # 保存反馈
            messages.success(request, '感谢您的反馈！我们会尽快处理。')  # 添加成功提示
            return redirect('help_document')  # 提交后重定向回帮助页面
    else:
        form = FeedbackForm()

    return render(request, 'help.html', {
        'documents': documents,
        'form': form,  # 将表单传递给模板
    })


# def help_document_view(request):
#     documents = HelpDocument.objects.all()
#     return render(request, 'help.html', {'documents': documents})




# import hanlp
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud
# from django.shortcuts import render, redirect
# from .models import TextEntry
# import os
#
# # 加载分词器和词性标注模型
# tokenizer = hanlp.load('CTB6_CONVSEG')  # 分词模型
# pos_tagger = hanlp.load('CTB9_CON_ELECTRA_SMALL')  # 词性标注模型
#
#
# def create_wordcloud(text, filename):
#     # 更新此路径为您的字体文件实际路径
#     font_path = os.path.join('analysis/static/fonts', 'SimHei.ttf')
#     wordcloud = WordCloud(font_path=font_path).generate(text)
#     plt.figure(figsize=(10, 5))
#     plt.imshow(wordcloud, interpolation='bilinear')
#     plt.axis('off')
#     plt.savefig(filename)
#     plt.close()
#
# def index(request):
#     if request.method == 'POST':
#         original_text = request.POST['text']
#
#         # 分词
#         segmented = tokenizer(original_text)
#
#         # 词性标注
#         pos_tags = pos_tagger(segmented)
#
#         # 格式化分词和词性结果
#         segmented_text = ' '.join(segmented)
#         pos_text = ' '.join([f"{word}/{tag}" for word, tag in zip(segmented, pos_tags)])
#
#         # 保存到数据库
#         text_entry = TextEntry(
#             original_text=original_text,
#             segmented_text=segmented_text,
#             part_of_speech=pos_text
#         )
#         text_entry.save()
#         # 生成词云
#         wordcloud_filename = f'analysis/static/img/wordcloud_{text_entry.id}.png'
#         create_wordcloud(' '.join(segmented), wordcloud_filename)
#
#         return redirect('result', entry_id=text_entry.id)
#
#     return render(request, 'index.html')
#
#
#
#
# def result(request, entry_id):
#     text_entry = TextEntry.objects.get(id=entry_id)
#
#     # 处理词性标注结果，确保只有有效的二元组
#     pos_tuples = []
#     for pos in text_entry.part_of_speech.split():
#         parts = pos.split('/')
#         if len(parts) == 2:  # 确保有两个部分
#             pos_tuples.append(parts)
#
#     return render(request, 'result.html', {
#         'text_entry': text_entry,
#         'pos_tuples': pos_tuples
#     })


# import hanlp
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud
# from django.shortcuts import render, redirect
# from .models import TextEntry
# import os
#
# # 加载分词器和词性标注模型
# tokenizer = hanlp.load('CTB6_CONVSEG')  # 分词模型
# pos_tagger = hanlp.load('CTB9_CON_ELECTRA_SMALL')  # 词性标注模型
#
# def create_wordcloud(text, filename):
#     font_path = os.path.join('analysis/static/fonts', 'SimHei.ttf')  # 字体路径
#     wordcloud = WordCloud(font_path=font_path).generate(text)
#     plt.figure(figsize=(10, 5))
#     plt.imshow(wordcloud, interpolation='bilinear')
#     plt.axis('off')
#     plt.savefig(filename)
#     plt.close()
#
# def index(request):
#     if request.method == 'POST':
#         original_text = request.POST['text']
#
#         # 分词
#         segmented = tokenizer(original_text)
#
#         # 词性标注
#         pos_tags = pos_tagger(segmented)
#
#         # 格式化分词和词性结果
#         segmented_text = ' '.join(segmented)
#         pos_text = ' '.join([f"{word}/{tag}" for word, tag in zip(segmented, pos_tags)])
#
#         # 保存到数据库
#         text_entry = TextEntry(
#             original_text=original_text,
#             segmented_text=segmented_text,
#             part_of_speech=pos_text
#         )
#         text_entry.save()
#
#         # 生成词云
#         # wordcloud_filename = f'analysis/static/img/wordcloud_{text_entry.id}.png'
#         wordcloud_filename = f'analysis/static/img/wordcloud.png'
#         create_wordcloud(' '.join(segmented), wordcloud_filename)
#
#         return redirect('result', entry_id=text_entry.id)
#
#     return render(request, 'index.html')
#
# def result(request, entry_id):
#     text_entry = TextEntry.objects.get(id=entry_id)
#
#     # 处理词性标注结果，确保只有有效的二元组
#     pos_tuples = []
#     for pos in text_entry.part_of_speech.split():
#         parts = pos.split('/')
#         if len(parts) == 2:  # 确保有两个部分
#             pos_tuples.append(parts)
#
#     # 统计词性用于可视化
#     pos_count = {}
#     for _, tag in pos_tuples:
#         if tag in pos_count:
#             pos_count[tag] += 1
#         else:
#             pos_count[tag] = 1
#
#     return render(request, 'result.html', {
#         'text_entry': text_entry,
#         'pos_tuples': pos_tuples,
#         'pos_count': pos_count  # 传递词性计数以供可视化使用
#     })

# import hanlp
# from django.shortcuts import render, redirect
# from .models import TextEntry
#
# # 加载分词器和词性标注模型
# tokenizer = hanlp.load('CTB6_CONVSEG')  # 分词模型
# pos_tagger = hanlp.load('CTB9_CON_ELECTRA_SMALL')  # 词性标注模型
#
#
# def index(request):
#     if request.method == 'POST':
#         original_text = request.POST['text']
#
#         # 分词
#         segmented = tokenizer(original_text)
#
#         # 词性标注
#         pos_tags = pos_tagger(segmented)
#
#         # 格式化分词和词性结果
#         segmented_text = ' '.join(segmented)
#         pos_text = ' '.join([f"{word}/{tag}" for word, tag in zip(segmented, pos_tags)])
#
#         # 保存到数据库
#         text_entry = TextEntry(
#             original_text=original_text,
#             segmented_text=segmented_text,
#             part_of_speech=pos_text
#         )
#         text_entry.save()
#
#         return redirect('result', entry_id=text_entry.id)
#
#     return render(request, 'index.html')
#
#
# def result(request, entry_id):
#     text_entry = TextEntry.objects.get(id=entry_id)
#
#     # 处理词性标注结果
#     pos_tuples = []
#     for pos in text_entry.part_of_speech.split():
#         parts = pos.split('/')
#         if len(parts) == 2:  # 确保有两个部分
#             pos_tuples.append(parts)
#
#     return render(request, 'result.html', {
#         'text_entry': text_entry,
#         'pos_tuples': pos_tuples  # 传递分词和词性对列表
#     })



