from flask import Blueprint, render_template, request, jsonify
from flask_login import current_user
import base64
import urllib.parse
import html
import random
import string
import lorem
import re
from collections import Counter

text_tools_bp = Blueprint('text_tools', __name__)


@text_tools_bp.route('/')
def index():
    tools = [
        {
            'name': '文本转换器',
            'slug': 'text-converter',
            'description': '大小写转换、首字母大写等基础文本转换功能',
            'icon': 'fa-font'
        },
        {
            'name': '文本编码/解码',
            'slug': 'text-codec',
            'description': 'Base64、URL、HTML实体编码解码工具',
            'icon': 'fa-code'
        },
        {
            'name': '文本比较',
            'slug': 'text-diff',
            'description': '比较两段文本的差异并高亮显示',
            'icon': 'fa-exchange-alt'
        },
        {
            'name': '随机文本生成',
            'slug': 'text-generator',
            'description': '生成随机文本、段落、列表等',
            'icon': 'fa-paragraph'
        },
        {
            'name': '文本统计分析',
            'slug': 'text-statistics',
            'description': '统计文本字符数、词数、行数及词频分析',
            'icon': 'fa-calculator'
        }
    ]
    return render_template('tools/text/index.html', tools=tools)


@text_tools_bp.route('/text_converter_ajax', methods=['POST'])
def text_converter_ajax():
    """处理文本转换的AJAX请求"""
    if request.method == 'POST' and request.headers.get('X-Requested-With') == 'XMLHttpRequest':
        text = request.form.get('text', '')
        operation = request.form.get('operation', '')
        result = ''
        
        try:
            if operation == 'uppercase':
                result = text.upper()
            elif operation == 'lowercase':
                result = text.lower()
            elif operation == 'capitalize':
                result = text.capitalize()
            elif operation == 'title':
                result = text.title()
            elif operation == 'remove_spaces':
                result = text.replace(' ', '')
            elif operation == 'remove_lines':
                result = text.replace('\n', ' ')
            elif operation == 'add_line_numbers':
                lines = text.split('\n')
                result = '\n'.join(f"{i+1}. {line}" for i, line in enumerate(lines))
            else:
                return jsonify({'error': 1, 'message': '不支持的操作类型'})
            
            return jsonify({
                'error': 0,
                'result': result
            })
        except Exception as e:
            return jsonify({'error': 1, 'message': f'处理出错: {str(e)}'})
    
    return jsonify({'error': 1, 'message': '无效的请求'})


@text_tools_bp.route('/text_converter', methods=['GET', 'POST'])
def text_converter():
    result = None
    original_text = ''
    operation = ''
    
    if request.method == 'POST':
        original_text = request.form.get('text', '')
        operation = request.form.get('operation', '')
        
        if operation == 'uppercase':
            result = original_text.upper()
        elif operation == 'lowercase':
            result = original_text.lower()
        elif operation == 'capitalize':
            result = original_text.capitalize()
        elif operation == 'title':
            result = original_text.title()
        elif operation == 'remove_spaces':
            result = original_text.replace(' ', '')
        elif operation == 'remove_lines':
            result = original_text.replace('\n', ' ')
        elif operation == 'add_line_numbers':
            lines = original_text.split('\n')
            result = '\n'.join(f"{i+1}. {line}" for i, line in enumerate(lines))
    
    return render_template('tools/text/text_converter.html', 
                          original_text=original_text,
                          operation=operation,
                          result=result)


@text_tools_bp.route('/text_codec_ajax', methods=['POST'])
def text_codec_ajax():
    """处理文本编码/解码的AJAX请求"""
    if request.method == 'POST' and request.headers.get('X-Requested-With') == 'XMLHttpRequest':
        original_text = request.form.get('text', '')
        operation = request.form.get('operation', '')
        result = ''
        
        try:
            if operation == 'base64_encode':
                result = base64.b64encode(original_text.encode()).decode()
            elif operation == 'base64_decode':
                try:
                    result = base64.b64decode(original_text.encode()).decode()
                except:
                    return jsonify({'error': 1, 'message': '解码失败，请检查输入是否为有效的Base64字符串'})
            elif operation == 'url_encode':
                result = urllib.parse.quote(original_text)
            elif operation == 'url_decode':
                try:
                    result = urllib.parse.unquote(original_text)
                except:
                    return jsonify({'error': 1, 'message': '解码失败，请检查输入是否为有效的URL编码字符串'})
            elif operation == 'html_encode':
                result = html.escape(original_text)
            elif operation == 'html_decode':
                try:
                    result = html.unescape(original_text)
                except:
                    return jsonify({'error': 1, 'message': '解码失败，请检查输入是否为有效的HTML实体编码字符串'})
            else:
                return jsonify({'error': 1, 'message': '不支持的操作类型'})
                
            return jsonify({
                'error': 0,
                'result': result
            })
        except Exception as e:
            return jsonify({'error': 1, 'message': f'处理出错: {str(e)}'})
    
    return jsonify({'error': 1, 'message': '无效的请求'})


@text_tools_bp.route('/text_codec', methods=['GET', 'POST'])
def text_codec():
    result = None
    original_text = ''
    operation = ''
    
    if request.method == 'POST':
        original_text = request.form.get('text', '')
        operation = request.form.get('operation', '')
        
        if operation == 'base64_encode':
            result = base64.b64encode(original_text.encode()).decode()
        elif operation == 'base64_decode':
            try:
                result = base64.b64decode(original_text.encode()).decode()
            except:
                result = "解码失败，请检查输入是否为有效的Base64字符串"
        elif operation == 'url_encode':
            result = urllib.parse.quote(original_text)
        elif operation == 'url_decode':
            try:
                result = urllib.parse.unquote(original_text)
            except:
                result = "解码失败，请检查输入是否为有效的URL编码字符串"
        elif operation == 'html_encode':
            result = html.escape(original_text)
        elif operation == 'html_decode':
            try:
                result = html.unescape(original_text)
            except:
                result = "解码失败，请检查输入是否为有效的HTML实体编码字符串"
    
    return render_template('tools/text/text_codec.html',
                          original_text=original_text,
                          operation=operation,
                          result=result)


@text_tools_bp.route('/text_diff_ajax', methods=['POST'])
def text_diff_ajax():
    """处理文本比较的AJAX请求"""
    if request.method == 'POST' and request.headers.get('X-Requested-With') == 'XMLHttpRequest':
        text1 = request.form.get('text1', '')
        text2 = request.form.get('text2', '')
        
        try:
            # 实际项目中应该使用专门的文本比较库，如difflib
            # 这里简化处理
            lines1 = text1.split('\n')
            lines2 = text2.split('\n')
            result = []
            
            for i, (line1, line2) in enumerate(zip(lines1, lines2)):
                if line1 != line2:
                    result.append(f"第 {i+1} 行不同:")
                    result.append(f"- {line1}")
                    result.append(f"+ {line2}")
            
            # 处理长度不同的情况
            if len(lines1) > len(lines2):
                for i in range(len(lines2), len(lines1)):
                    result.append(f"第 {i+1} 行在第二个文本中不存在:")
                    result.append(f"- {lines1[i]}")
            elif len(lines2) > len(lines1):
                for i in range(len(lines1), len(lines2)):
                    result.append(f"第 {i+1} 行在第一个文本中不存在:")
                    result.append(f"+ {lines2[i]}")
            
            if not result:
                result = ["两段文本完全相同"]
            
            result_text = '\n'.join(result)
            
            return jsonify({
                'error': 0,
                'result': result_text
            })
        except Exception as e:
            return jsonify({'error': 1, 'message': f'处理出错: {str(e)}'})
    
    return jsonify({'error': 1, 'message': '无效的请求'})


@text_tools_bp.route('/text_diff', methods=['GET', 'POST'])
def text_diff():
    result = None
    text1 = ''
    text2 = ''
    
    if request.method == 'POST':
        text1 = request.form.get('text1', '')
        text2 = request.form.get('text2', '')
        # 实际项目中应该使用专门的文本比较库，如difflib
        # 这里简化处理
        lines1 = text1.split('\n')
        lines2 = text2.split('\n')
        result = []
        for i, (line1, line2) in enumerate(zip(lines1, lines2)):
            if line1 != line2:
                result.append(f"第 {i+1} 行不同:")
                result.append(f"- {line1}")
                result.append(f"+ {line2}")
        
        # 处理长度不同的情况
        if len(lines1) > len(lines2):
            for i in range(len(lines2), len(lines1)):
                result.append(f"第 {i+1} 行在第二个文本中不存在:")
                result.append(f"- {lines1[i]}")
        elif len(lines2) > len(lines1):
            for i in range(len(lines1), len(lines2)):
                result.append(f"第 {i+1} 行在第一个文本中不存在:")
                result.append(f"+ {lines2[i]}")
        
        if not result:
            result = ["两段文本完全相同"]
        
        result = '\n'.join(result)
    
    return render_template('tools/text/text_diff.html',
                           text1=text1,
                           text2=text2,
                           result=result)


@text_tools_bp.route('/text_generator', methods=['GET', 'POST'])
def text_generator():
    """随机文本生成工具"""
    result = None
    params = {}
    
    if request.method == 'POST':
        gen_type = request.form.get('type', 'paragraph')
        count = int(request.form.get('count', 3))
        
        params = {'type': gen_type, 'count': count}
        
        try:
            # 简单实现，真实项目中可以使用更专业的文本生成库
            if gen_type == 'paragraph':
                # 生成段落
                result = '\n\n'.join([generate_paragraph() for _ in range(count)])
            elif gen_type == 'sentence':
                # 生成句子
                result = ' '.join([generate_sentence() for _ in range(count)])
            elif gen_type == 'word':
                # 生成单词
                result = ' '.join([generate_word() for _ in range(count)])
            elif gen_type == 'list':
                # 生成列表项
                items = [generate_sentence() for _ in range(count)]
                result = '\n'.join([f"- {item}" for item in items])
            else:
                result = "不支持的生成类型"
        except Exception as e:
            result = f"生成文本时出错: {str(e)}"
    
    return render_template('tools/text/text_generator.html',
                           result=result,
                           params=params)


@text_tools_bp.route('/text_statistics', methods=['GET', 'POST'])
def text_statistics():
    """文本统计分析工具"""
    stats = None
    original_text = ''
    
    if request.method == 'POST':
        original_text = request.form.get('text', '')
        
        if original_text:
            # 基本统计
            char_count = len(original_text)
            char_count_no_spaces = len(original_text.replace(' ', '').replace('\t', '').replace('\n', ''))
            line_count = len(original_text.splitlines()) or 1
            
            # 段落统计（通过连续两个换行符分隔）
            paragraphs = re.split(r'\n\s*\n', original_text)
            paragraph_count = len(paragraphs) or 1
            
            # 单词统计（英文根据空格，中文每个字符算一个词）
            words = re.findall(r'\b\w+\b|[\u4e00-\u9fff]', original_text)
            word_count = len(words) or 0
            
            # 句子统计（通过句号、问号、感叹号分隔）
            sentences = re.split(r'[.!?。！？]+', original_text)
            sentence_count = len([s for s in sentences if s.strip()]) or 1
            
            # 平均单词长度
            avg_word_length = round(sum(len(word) for word in words) / word_count, 1) if word_count > 0 else 0
            
            # 阅读时间（假设平均阅读速度每分钟200个单词）
            reading_time = max(1, round(word_count / 200, 1))
            
            # 词频统计（排除常见虚词）
            stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'if', 'then', 'else', 'when',
                           'at', 'from', 'by', 'for', 'with', 'about', 'to', 'in', 'on', 'of',
                           '的', '了', '和', '是', '在', '我', '有', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到'}
            filtered_words = [w.lower() for w in words if len(w) > 1 and w.lower() not in stop_words]
            word_frequency = Counter(filtered_words).most_common(10)
            
            stats = {
                'char_count': char_count,
                'char_count_no_spaces': char_count_no_spaces,
                'word_count': word_count,
                'line_count': line_count,
                'paragraph_count': paragraph_count,
                'sentence_count': sentence_count,
                'avg_word_length': avg_word_length,
                'reading_time': reading_time,
                'word_frequency': word_frequency
            }
    
    return render_template('tools/text/text_statistics.html',
                           original_text=original_text,
                           stats=stats)

def generate_paragraph():
    """生成一个随机段落"""
    try:
        # 使用中文段落而不是英文
        if random.choice([True, False]):  # 随机选择中文或英文
            return '\n\n'.join(generate_chinese_paragraph() for _ in range(random.randint(1, 3)))
        else:
            return lorem.paragraph()
    except:
        # 如果没有安装lorem库，使用简单的实现
        if random.choice([True, False]):  # 随机选择中文或英文
            return '\n\n'.join(generate_chinese_paragraph() for _ in range(random.randint(1, 3)))
        else:
            sentences = [generate_sentence() for _ in range(random.randint(3, 8))]
            return ' '.join(sentences)

def generate_sentence():
    """生成一个随机句子"""
    try:
        # 使用中文句子而不是英文
        if random.choice([True, False]):  # 随机选择中文或英文
            return generate_chinese_sentence()
        else:
            return lorem.sentence()
    except:
        # 简单实现
        if random.choice([True, False]):  # 随机选择中文或英文
            return generate_chinese_sentence()
        else:
            words = [generate_word() for _ in range(random.randint(5, 15))]
            sentence = ' '.join(words)
            return sentence[0].upper() + sentence[1:] + '.'

def generate_word():
    """生成一个随机单词"""
    try:
        # 使用中文词而不是英文
        if random.choice([True, False]):  # 随机选择中文或英文
            return generate_chinese_word()
        else:
            return lorem.word()
    except:
        # 简单实现
        if random.choice([True, False]):  # 随机选择中文或英文
            return generate_chinese_word()
        else:
            length = random.randint(3, 10)
            return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))

def generate_chinese_paragraph():
    """生成一个中文段落"""
    sentences_count = random.randint(3, 8)
    return ''.join([generate_chinese_sentence() for _ in range(sentences_count)])

def generate_chinese_sentence():
    """生成一个中文句子"""
    # 常用汉字集合
    words_count = random.randint(5, 20)
    sentence = ''.join([generate_chinese_word() for _ in range(words_count)])
    # 添加中文标点
    punctuation = random.choice(['。', '！', '？', '；', '，'])
    return sentence + punctuation

def generate_chinese_word():
    """生成一个中文词语"""
    # 常用汉字Unicode编码范围: 0x4E00-0x9FFF
    common_words = [
        '的', '一', '是', '了', '我', '不', '人', '在', '他', '有', '这', '个', '上', '们', '来', '到', '时', '大', '地', '为',
        '子', '中', '你', '说', '生', '国', '年', '着', '就', '那', '和', '要', '她', '出', '也', '得', '里', '后', '自', '以',
        '会', '家', '可', '下', '而', '过', '天', '去', '能', '对', '小', '多', '然', '于', '心', '学', '么', '之', '都', '好',
        '看', '起', '发', '当', '没', '成', '只', '如', '事', '把', '还', '用', '第', '样', '道', '想', '作', '种', '开', '美',
        '总', '从', '无', '情', '己', '面', '最', '女', '但', '现', '前', '些', '所', '同', '日', '手', '又', '行', '动', '方',
        '期', '头', '经', '长', '儿', '回', '位', '分', '爱', '老', '因', '很', '给', '名', '法', '间', '斯', '知', '世', '什',
        '两', '次', '使', '身', '者', '被', '高', '已', '亲', '其', '进', '此', '话', '常', '与', '活', '正', '感', '见', '明',
        '问', '力', '理', '尔', '点', '文', '几', '定', '本', '公', '特', '做', '外', '孩', '相', '西', '果', '走', '将', '月',
        '十', '实', '向', '声', '车', '全', '信', '重', '三', '机', '工', '物', '气', '每', '并', '别', '真', '打', '太', '新',
        '比', '才', '便', '夫', '再', '书', '部', '水', '像', '眼', '等', '体', '却', '加', '电', '主', '界', '门', '利', '海'
    ]
    # 生成2到4个字的词语
    word_length = random.randint(1, 3)
    return ''.join(random.choice(common_words) for _ in range(word_length)) 