package cn.edu.zjut.medlinebackend.utils;


import com.google.gson.Gson;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;

import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.*;

import static cn.edu.zjut.medlinebackend.constant.FileConstant.FILE_ROOT_PATH;

public class MyUtils {

    /**
     * 获取关键词前后100字内容
     * @param content
     * @param keyword
     * @return
     */
    public static String getKeyWordAbs(String content, String keyword){
        int n = 1;  // 控制第几次出现
        int keywordIndex = -1;
        content = content.replaceAll("\\s", " ");

        for (int i = 0; i < n; i++) {
            keywordIndex = content.indexOf(keyword, keywordIndex + 1);
            if (keywordIndex == -1){
                return "";
            }
        }

        int start = Math.max(0, keywordIndex - 100);
        int end = Math.min(content.length(), keywordIndex + keyword.length() + 100);

        return content.substring(start, end);
    }

    public static String getCookieValue(HttpServletRequest request, String cookieName) {
        Cookie[] cookies = request.getCookies();
        if (cookies != null) {
            for (Cookie cookie : cookies) {
                if (cookie.getName().equals(cookieName)) {
                    return cookie.getValue();
                }
            }
        }
        return null;
    }

    public static List<String> segmentText(String text) {
        JiebaSegmenter segmenter = new JiebaSegmenter();
        List<String> segmentedWords = new ArrayList<>();

        // 使用结巴分词器对文本进行分词
        List<SegToken> segTokens = segmenter.process(text, JiebaSegmenter.SegMode.INDEX);
        for (SegToken segToken : segTokens) {
            segmentedWords.add(segToken.word);
        }

        return segmentedWords;
    }


    /**
     * 获取关键词在pdf中的哪一页
     *
     * @param keyWord  关键词
     * @param fileName 文件名
     * @return 返回页码和行数
     */
    public static Map<String, Integer> getKeywordLocator(String keyWord, String fileName){
        Map<String, Integer> map = new HashMap<>();
        String filePath = FILE_ROOT_PATH + "\\" + fileName;
        try (PDDocument document = PDDocument.load(new File(filePath))) {
            PDFTextStripper pdfTextStripper = new PDFTextStripper();
            for (int pageNumber = 1; pageNumber <= document.getNumberOfPages(); pageNumber++) {
                pdfTextStripper.setStartPage(pageNumber);
                pdfTextStripper.setEndPage(pageNumber);
                String pageText = pdfTextStripper.getText(document);
                String[] lines = pageText.split(System.getProperty("line.separator"));
                for (int i = 0; i < lines.length; i++) {
                    if (lines[i].contains(keyWord)) {
                        map.put("line", i + 1);
                        map.put("page", pageNumber);
                        System.out.println("Keyword found on page " + pageNumber + ", line " + (i + 1));
                        return map; // Only output the first occurrence
                    }
                }
            }
            System.out.println("Keyword not found in the PDF file.");
        } catch (IOException e) {
            e.printStackTrace();
        }
        return map;
    }

    public static Float calScore(Float originScore, Integer likeNum, Integer dislikeNum, Integer downloadNum){
        return (float) (originScore + likeNum * 0.1 - dislikeNum * 0.05 + downloadNum * 0.1);
    }
}
