package org.example.corpus.monolingual.service.impl;

import com.huaban.analysis.jieba.JiebaSegmenter;
import lombok.extern.slf4j.Slf4j;
import org.example.corpus.config.Constants;
import org.example.corpus.corpuscenter.dao.CorpusDao;
import org.example.corpus.corpuscenter.service.CorpusService;
import org.example.corpus.corpuscenter.service.RedisService;
import org.example.corpus.model.Node;
import org.example.corpus.model.Sentence;
import org.example.corpus.monolingual.controller.dto.FrequencyDto;
import org.example.corpus.monolingual.controller.dto.kwic.KwicRequest;
import org.example.corpus.monolingual.controller.dto.kwic.KwicResponse;
import org.example.corpus.monolingual.dao.MonolingualDao;
import org.example.corpus.monolingual.service.MonolingualService;
import org.example.corpus.utils.FileUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;

@Service
@Slf4j
public class MonolingualServiceImpl implements MonolingualService {
    @Autowired
    private MonolingualDao monolingualDao;
    @Override
    public List<KwicResponse.KwicData> getKwicResults(KwicRequest request) {
        String file=request.getFile();
        String keyword = request.getWord();
        List<Integer> leftpart = request.getLeftpart();
        List<Integer> rightpart = request.getRightpart();
        int pageSize = request.getPageSize();
        int pageNum = request.getPageNum();
        int offset = (pageNum - 1) * pageSize;

//        log.info("kwic request: " + request);

        int leftlast = -1;
        for (int i = leftpart.size() - 1; i >= 0; i--) {
            if (leftpart.get(i) == 1) {
                leftlast = i;
                break; // 找到后立即退出循环
            }
        }

        // 找到 rightpart 中最后一个值为 1 的索引
        int rightlast = -1;
        for (int i = rightpart.size() - 1; i >= 0; i--) {
            if (rightpart.get(i) == 1) {
                rightlast = i;
                break; // 找到后立即退出循环
            }
        }

//        log.info("leftlast: " + leftlast+" rightlast: " + rightlast);

        List<KwicResponse.KwicData> kwicDataList = new ArrayList<>();
        //1.检索node表，获取所有包含keyword的节点
        Integer corpusId=monolingualDao.getCorpusId(file);
        List<Node> nodes=monolingualDao.getNodes(keyword,corpusId);
        List<Integer> existingSentenceIds = new ArrayList<>();
        for(Node node:nodes){
            String left=node.getLeftContext();
            String right=node.getRightContext();

            String[] leftParts = left.split("\\s+");
            String[] rightParts = right.split("\\s+");

            if(leftlast<=leftParts.length&& rightlast<=rightParts.length)
//                log.info("existingSentenceIds++");
                existingSentenceIds.add(node.getSentenceId());
//            System.out.println(existingSentenceIds);
        }


        // 假设 corpusMapper.getKwicResults 返回完整的句子
        List<Sentence> sentences=monolingualDao.getKwicResults(offset,pageSize,keyword,corpusId);
//        log.info("sentences:"+sentences);

        List<Sentence> newSentences = new ArrayList<>();
        List<Sentence> oldSentences = new ArrayList<>();
        for(Sentence sentence:sentences){
            if (!existingSentenceIds.contains(sentence.getId())) {
//                log.info("newSentences++");
                newSentences.add(sentence);
            }
            else {
//                log.info("oldSentences++");
                oldSentences.add(sentence);
            }
        }
        //已经存在的句子
        for (Sentence sentence:oldSentences ) {
//            log.info("oldSentences_use++");
            KwicResponse.KwicData kwicData = new KwicResponse.KwicData();
            String context = sentence.getContent();
            kwicData.setContext(context);

            // 假设 splitContext 方法根据 keyword 和 leftCount, rightCount 分割句子
            String[] parts = splitContext(context, keyword, leftpart, rightpart,leftlast+1,rightlast+1);
            kwicData.setLeftContext(parts[0]);
            kwicData.setKeyword(keyword);
            kwicData.setRightContext(parts[2]);

            kwicDataList.add(kwicData);
        }
        // 尚未存在的句子
        for (Sentence sentence:newSentences ) {
//            log.info("newSentences_use++");
            KwicResponse.KwicData kwicData = new KwicResponse.KwicData();
            String context = sentence.getContent();
            Integer sentenceId = sentence.getId();

            kwicData.setContext(context);

            // 假设 splitContext 方法根据 keyword 和 leftCount, rightCount 分割句子
            String[] parts = splitContext(context, keyword, leftpart, rightpart,leftlast+1,rightlast+1,corpusId,sentenceId);
            kwicData.setLeftContext(parts[0]);
            kwicData.setKeyword(keyword);
            kwicData.setRightContext(parts[2]);

            kwicDataList.add(kwicData);
        }

        return kwicDataList;
    }

    private String[] splitContext(String context, String keyword, List<Integer> leftpart, List<Integer> rightpart,Integer leftlast, Integer rightlast) {
        String[] parts = new String[3]; // 创建一个包含三个元素的数组
        int index = context.indexOf(keyword);
        if (index != -1) {
            String leftPart = context.substring(0, index).trim();
            String rightPart = context.substring(index + keyword.length()+1,context.length()).trim();


            // 根据 leftCount 和 rightCount 进行进一步分割
            String[] leftWords = leftPart.split("\\s+");
            String[] rightWords = rightPart.split("\\s+");

            List<String> leftPartsList = Arrays.asList(leftWords);
            Collections.reverse(leftPartsList);
            leftWords = leftPartsList.toArray(new String[0]);

            String left=buildPart(leftWords, 0, leftlast, leftpart);
            String right=buildPart(rightWords, 0, rightlast, rightpart);

            String[] left_2=left.split("\\s+");
            leftPartsList = Arrays.asList(left_2);
            Collections.reverse(leftPartsList);
            StringBuilder sb = new StringBuilder();
            leftWords = leftPartsList.toArray(new String[0]);
            for (int i = 0; i < leftWords.length; i++){
                sb.append(leftWords[i]);
                if (i < leftWords.length - 1) {
                    sb.append(" ");  // 添加分隔符
                }
            }


            parts[0] = sb.toString().trim();// 左边的内容
            parts[1] = keyword; // 关键字本身
            parts[2] = right.trim(); // 右边的内容
        } else {
            // 如果关键字不在上下文中，返回空字符串
            parts[0] = "";
            parts[1] = keyword;
            parts[2] = "";
        }
        return parts;
    }

    private String buildPart(String[] words, int start, int end, List<Integer> partList) {
        StringBuilder result = new StringBuilder();
        for (int i = start; i < end; i++) {
            if (i - start < partList.size() && partList.get(i - start) == 1) {
                result.append(words[i]).append(" ");
            } else {
                result.append("- ").append(" ");
            }
        }
        return result.toString().trim();
    }

    private String[] splitContext(String context, String keyword, List<Integer> leftpart, List<Integer> rightpart,Integer leftlast, Integer rightlast,int corpusId, int sentenceId) {
        String[] parts = new String[3]; // 创建一个包含三个元素的数组
        int index = context.indexOf(keyword);
        if (index != -1) {
            String leftPart = context.substring(0, index).trim();
            String rightPart = context.substring(index + keyword.length()+1,context.length()).trim();

            monolingualDao.addNodes(corpusId,keyword,sentenceId,leftPart,rightPart);


            // 根据 leftCount 和 rightCount 进行进一步分割
            String[] leftWords = leftPart.split("\\s+");
            String[] rightWords = rightPart.split("\\s+");

            List<String> leftPartsList = Arrays.asList(leftWords);
            Collections.reverse(leftPartsList);
            leftWords = leftPartsList.toArray(new String[0]);

            String left=buildPart(leftWords, 0, leftlast, leftpart);
            String right=buildPart(rightWords, 0, rightlast, rightpart);

            String[] left_2=left.split("\\s+");
            leftPartsList = Arrays.asList(left_2);
            Collections.reverse(leftPartsList);
            StringBuilder sb = new StringBuilder();
            leftWords = leftPartsList.toArray(new String[0]);
            for (int i = 0; i < leftWords.length; i++){
                sb.append(leftWords[i]);
                if (i < leftWords.length - 1) {
                    sb.append(" ");  // 添加分隔符
                }
            }

            parts[0] = sb.toString().trim();// 左边的内容
            parts[1] = keyword; // 关键字本身
            parts[2] =right.trim(); // 右边的内容
        } else {
            // 如果关键字不在上下文中，返回空字符串
            parts[0] = "";
            parts[1] = keyword;
            parts[2] = "";
        }

        return parts;
    }





}
