package com.zju.searchEngine.crawler;

import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;

import com.zju.searchEngine.model.Index;
import com.zju.searchEngine.model.WebPageWithBLOBs;
import com.zju.searchEngine.service.IIndexService;
import com.zju.searchEngine.service.IWebPageService;

@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = { "classpath:beans.xml" })
public class MyCrawler {

	@Autowired
	private IWebPageService webPageService;
	@Autowired
	private IIndexService indexService;
	
	@Test
	public void main(){
		crawl();
		createIndex();
	}

	public void crawl() {
		// 1、初始网址
		String startUrl = "https://www.zhihu.com/topic/19554298/top-answers?page=";

		Set<String> urlSet = new HashSet<String>();
		Set<String> tagSet = new HashSet<String>();

		// 循环获取100个网址
		for (int i = 1;; i++) {
			String html = HttpClinetUtil.sendGet(startUrl + i);
			String regex = "\\/question\\/\\d{8}";
			Pattern pattern = Pattern.compile(regex);
			Matcher matcher = pattern.matcher(html);
			while (matcher.find()) {
				urlSet.add(matcher.group());
				if (urlSet.size() >= 100)
					break;
			}
			System.out.println(urlSet.size());
			if (urlSet.size() >= 100)
				break;
		}
		// 对获取的网址查找相关信息并存到数据库
		for (String url : urlSet) {
			WebPageWithBLOBs webPage = new WebPageWithBLOBs();
			String targetUrl = "https://www.zhihu.com" + url;
			System.out.println(targetUrl);
			webPage.setUrl(targetUrl);

			String html = HttpClinetUtil.sendGet(targetUrl);
			webPage.setContent(html);

			Document dom = Jsoup.parse(html);
			Elements eleTitle = dom.getElementsByClass("zm-item-title");
			String title = eleTitle.text();
			System.out.println(title);
			webPage.setPagename(title);

			Elements eleTags = dom.getElementsByClass("zm-item-tag");
			String tags = eleTags.text();
			System.out.println(tags);
			webPage.setTags(tags);
			webPageService.insert(webPage);
			// 存每一个网页中的标签
			for (Element element : eleTags) {
				String tag = element.text();
				if(!tagSet.contains(tag)){
					Index index = new Index();
					index.setKeyword(tag);
					indexService.insert(index);
				}
				tagSet.add(tag);
			}
		}
	}
	
	public void createIndex(){
		List<String> tagsList = webPageService.selectAllTags();
		List<Index> indexList = indexService.selectAllIndex();
		for(int i=0;i<indexList.size();i++){
			StringBuilder sb = new StringBuilder();
			for (String tags : tagsList) {
				if(tags.contains(indexList.get(i).getKeyword())){
					sb.append("1");
				}else{
					sb.append("0");
				}
			}
			indexList.get(i).setKeyindex(sb.toString());
			indexService.updateById(indexList.get(i));
		}
	}
}
