package com.sentiment.tools;

import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import org.json.JSONObject;
import org.jsoup.nodes.Element;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.sentiment.config.Config;
import com.sentiment.contentextractor.News;
import com.sentiment.database.bean.CrawlInfo;
import com.sentiment.nlpalgo.bean.Double2;
import com.sentiment.nlpalgo.bean.Keywords;
import com.sentiment.nlpalgo.method.AlgoHttp;

public class Transaction {
	private static final Logger LOG = LoggerFactory.getLogger(Transaction.class);

	/**
	 * 将一段String变为一个String列表，主要用于将正文变为数据库需要的格式 将数据变为可以接受算法接口的格式
	 * 
	 * @param text
	 * @return
	 */
	public static List<String> string2List(String text) {
		List<String> list = new ArrayList<String>();
		list.add(text);
		return list;
	}

	/**
	 * 将一个String的列表变为一段String，主要用于将由数据库输出的正文格式改变
	 * 
	 * @param li
	 * @return
	 */
	public static String list2String(List<String> li) {
		StringBuilder sb = new StringBuilder();
		for (String s : li) {
			sb.append(s);
		}
		return sb.toString();
	}

	/**
	 * 将News新闻列表变为CrawlInfo爬取信息列表
	 * 
	 * @param news
	 * @return
	 */
	public static List<CrawlInfo> news2CrawlInfos(List<News> news) {
		AlgoHttp algoHttp = new AlgoHttp();
		List<CrawlInfo> infos = new ArrayList<CrawlInfo>();
		CrawlInfo info = null;
		Date now = new Date();
		for (News n : news) {
			List<String> list = new ArrayList<String>();
			info = new CrawlInfo();
			info.setDate(now);
			info.setType("新闻");
			info.setTitle(n.getTitle());
			// 这里并没有分段
			// 可以改用getContentElement试试
			info.setText(crawlInfoText(n.getContentElement()));
			info.setUrl(n.getUrl());
			info.setReleaseTime(Format.string2Date(n.getTime(), 6));
			// 暂时获取不到转载次数等信息
			// 以下全用随机数测试
			int begin = 50;
			int end = 5000;
			info.setReprint(Generator.generateInt(begin, end));
			end = 25000;
			info.setPageView(Generator.generateInt(begin, end));
			end = 200;
			info.setComment(Generator.generateInt(begin, end));
			if (Config.useNlp) {
				List<Double2> double2s = algoHttp.sentiment(string2List(n.getContent()), AlgoHttp.SENTIMENT_NEWS);
				if(double2s == null){
					//BosonNLP出错，返回值为null，则放弃该篇新闻，处理下一篇
					continue;
				}
				info.setSentiment(double2s.get(0).getData1());
				List<Keywords> keywords = algoHttp.keywords(string2List(n.getContent()));
				for(int i = 0 ; i < 3 ; i ++){
					list.add(keywords.get(i).getWord());
				}
				info.setInnerKeyword(list);
			} else{
				info.setSentiment(Generator.genenrateDouble(0, 1));
				info.setInnerKeyword(list);
			}
			infos.add(info);
		}
		return infos;
	}
/**
 * 将正文部分分段存储 List中的每一项为一段
 * @param e
 * @return
 */
	private static List<String> crawlInfoText(Element e){
		List<String> text = new ArrayList<String>();
		if(e.children() != null){
			
			for(Element ele  : e.children()){
				String addText = ele.text().replaceAll("\\s", "");
				if(addText != "" && addText != null)
					text.add(addText);
			}
		}
		return text;
	}
	
	/**
	 * 将CrawlInfo转换成JSONObject的列表
	 * 
	 * @param list
	 * @return
	 */
	public static List<JSONObject> crawlInfo2Json(List<CrawlInfo> list) {
		if (list != null) {
			List<JSONObject> jsonObjects = new ArrayList<JSONObject>();
			for (CrawlInfo ci : list) {
				JSONObject json = new JSONObject();
				json.put("sentiment", ci.getSentiment());
				json.put("title", ci.getTitle());
				json.put("url", ci.getUrl());
				json.put("publisher", ci.getPublisher());
				json.put("reprint", ci.getReprint());
				json.put("comment", ci.getComment());
				json.put("pageview", ci.getPageView());
				json.put("keywords", ci.getInnerKeyword());
				jsonObjects.add(json);
			}
			return jsonObjects;
		} else {
			return null;
		}

	}
}
