package com.ideal.gpt.ability.docqa;

import java.io.File;
import java.util.ArrayList;
import java.util.List;

import com.ideal.gpt.ability.common.NlpJob;
import com.ideal.gpt.ability.docqa.splitter.ResponseSplitter;
import com.lzwork.gpt.utility.common.Passage;
import com.lzwork.utility.collection.CollUtility;
import com.lzwork.utility.file.LineContent;
import com.lzwork.utility.logger.LOGGER;
import com.lzwork.utility.pair.StringPair;
import com.lzwork.utility.str.StrUtility;
import com.lzwork.utility.str.StrUtility.CheckNull;

import lombok.Getter;

/**
 * @author LiZheng
 * @date 2024年4月28日 15:52:59
 */

public class TF_Splitter extends ResponseSplitter
{
	final static String KEY_TITLE = "title:";
	final static String[] KEY_KEYWODS =
	{ "keyword:", "关键词", "关键字" };

	@Getter
	public class TfInfo extends StringPair
	{
		public TfInfo(String sentStr, String keyword, String titleStr)
		{
			super(sentStr, keyword);
			sent = sentStr;
			rawKeyword = keyword;
			title = titleStr;

			keyword = keyword.replace("，", ",");
			keyword = keyword.replace("、", ",");
			keyword = keyword.replace("。", "");
			keyword = keyword.replace(":", "");

			String[] parts = keyword.split("[,]|[ ]");
			keywords = StrUtility.objectsToStrings(CheckNull.RemoveEmpty, true, parts);

			if (keywords.isEmpty())
			{
				keywords.addAll(NlpJob.splitCoreWords(sent, true));

				if (log)
				{
					if (keywords.isEmpty())
						LOGGER.log(LOGGER.PARSE, "No valid keyword for sentence: " + sent);
					else
						LOGGER.log(LOGGER.PARSE, "No valid keyword for sentence: " + sent + ". Use segments instead.");
				}
			}
		}

		String sent;
		String rawKeyword;
		String title;

		List<String> keywords = new ArrayList<>();

		@Override
		public String toString()
		{
			return "T[" + title + "] - S[" + sent + "] - K[" + rawKeyword + "]";
		}
	}

	public TF_Splitter(int max, boolean log)
	{
		super(max, log);
	}

	protected StringPair subKeySplit(String input)
	{
		for (String word : KEY_KEYWODS)
		{
			int split = input.indexOf(word);
			if (split >= 0)
			{
				String second = input.substring(split + word.length());
				String first = input.substring(0, split);
				return new StringPair(first, second);
			}
		}

		return null;
	}

	public List<TfInfo> keywordSplit(String response)
	{
		List<TfInfo> result = new ArrayList<>();
		SplitResult qasp = splitResponse(response, 24);

		if (qasp.isEmpty())
		{
			List<String> toGet = Passage.cutSentences(response, 50, 50);
			List<String> sents = CollUtility.randomGetItems(toGet, max);

			for (String sent : sents)
			{
				result.add(new TfInfo(sent, "", ""));
			}
		} else
		{
			for (StringPair sp : qasp.target())
			{
				TfInfo item;
				String first = sp.first();
				String second = sp.second();

				if (!StrUtility.stringValid(sp.second()))
				{
					StringPair split = subKeySplit(first);
					if (split != null)
					{
						first = split.first();
						second = split.second();
					}
				}

				int split = second.indexOf(KEY_TITLE);
				if (split >= 0)
				{
					String keyword = second.substring(0, split).trim();
					String title = second.substring(split + KEY_TITLE.length()).trim();

					if (title.equals("无"))
						title = "";

					item = new TfInfo(first, keyword, title);
				} else
				{
					item = new TfInfo(first, second, "");
				}

				result.add(item);
			}
		}

		return result;
	}

	public static void main(String[] args)
	{
		TF_Splitter job = new TF_Splitter(3, false);
		String content = LineContent.loadString(new File("v:/doc qa/split_test.txt"));

		List<TfInfo> result = job.keywordSplit(content);

		System.out.println("Total: " + result.size());

		System.out.println(StrUtility.combineString(result, "\n-------------\n"));
	}
}
