package org.robot.work.web20190114;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;

import org.jsoup.helper.StringUtil;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.robot.Spider;
import org.robot.component.Attachment;
import org.robot.component.Page;
import org.robot.component.Rule;
import org.robot.dao.AuthorDao;
import org.robot.dao.IssueDao;
import org.robot.dao.JournalDao;
import org.robot.dao.PaperDao;
import org.robot.pojo.Author;
import org.robot.pojo.Issue;
import org.robot.pojo.Journal;
import org.robot.pojo.Paper;
import org.robot.utils.MatchEmail;
import org.robot.utils.RegexUtil;

/**
 * 已更新
 * @author pdz
 * @date 2019年1月10日
 */
public class Ivyspring extends Spider {
	private static final Long TASKID = 1003L;

	@Override
	public void parse(Page page) throws Exception {
		if (page.typeEquals("journal")) {
//			parseJournal(page);
		} else if (page.typeEquals("archive")) {
			List<Element> list = page.eleStream(Rule.createSelectorRule(".tdshade:eq(0)>tbody>tr>td>a", "issue")).collect(Collectors.toList());
			String year = null;
			for (Element ele : list) {
				String url = ele.absUrl("href");
				String year2 = RegexUtil.getYear(ele.parent().parent().text());
				year = StringUtil.isBlank(year2) ? year : year2;
				if (StringUtil.isBlank(year) || !year.matches("201[7-9]") || url.contains("archive"))
					break;
//				System.out.println(year);
				Map<String, Object> infoMap = new HashMap<>();
				infoMap.put(Page.URLTEXT, ele.text());
				infoMap.put("year", year);
				page.addNext(url, infoMap, "issue");
			}
		} else if (page.typeEquals("issue")) {
//			parseIssue(page);
		} else if (page.typeEquals("paper")) {
//			parsePaper(page);
			parsePaperInfo(page);
		}
		
	}
	

	private void parsePaperInfo(Page page) {
		Long paperid = PaperDao.selectPidByWebsite(Paper.class, page.url(), TASKID);
		String[] autArr = page.select("p.author").first().ownText().split(",");
		IntStream.range(0, autArr.length).forEach(i -> autArr[i] = autArr[i].trim());
		Elements eles = page.select("img[alt=Corresponding address]");
		if (!eles.isEmpty()) {
			String html = eles.last().parent().html();
			List<String> emailList = new ArrayList<>();
			List<String> matchAutList = matchAut(html, autArr);
			List<String> prevList = RegexUtil.regex("\\S+<script", html);
			List<String> suffList = RegexUtil.regex("</script>\\S+", html);
			List<String> list1 = RegexUtil.regex("thismailbody12 = \".+;", html);
			List<String> list2 = RegexUtil.regex("thismailbody1 = thismailbody12.+;", html);
			for (int i = 0; i < prevList.size(); i++) {
				String prev = prevList.get(i).split("<")[0];
				String suff = suffList.get(i).split(">")[1];
				String p1 = RegexUtil.regexFirst("\".+\"", list1.get(i)).replace("\"", "").trim();
				String p2 = RegexUtil.regexFirst("\".+\"", list2.get(i)).replace("\"", "").trim();
				p2 = p2.substring(0, p2.indexOf("&"));
				String email = prev + "@" + p1 + p2 + "." + suff;
				email = MatchEmail.getFirstEmail(email);
				emailList.add(email);
			}
			for (String aut : autArr) {
				int i = matchAutList.indexOf(aut);
				Author author = new Author();
				author.setFullName(aut);
				if (i >= 0 && i < emailList.size()) {
					author.setEmail(emailList.get(i));
				}
				author.setTaskId(TASKID);
				author.setPaperid(paperid);
				AuthorDao.addAuthor(author);
			}
		}
	}


	private List<String> matchAut(String html, String[] autArr) {
		return Stream.of(autArr).filter(a -> html.contains(a)).collect(Collectors.toList());
	}


	private void parsePaper(Page page) {
		Long issueid = IssueDao.selectPidByWebsite(Issue.class, page.prevUrl(), TASKID);
		String title = page.select("title").text();
		String str = page.select("#headingAtop").text();
		String doi = str.substring(str.indexOf("doi:")).replace("doi:", "").trim();
		String abst = page.select("h1.articlegroup").first().nextElementSibling().text();
		Elements eles = page.select("b:containsOwn(Keywords)");
		String keyWords = null;
		if (!eles.isEmpty()) {
			keyWords = eles.first().parent().text().replace("Keywords:", "").trim();
		}
		String pdfUrl = page.select(".textbutton").first().absUrl("href");
		
		Paper paper = new Paper();
		paper.setWebsite(page.url());
		paper.setTitle(title);
		paper.setKeywords(keyWords);
		paper.setTaskid(TASKID);
		paper.setDoi(doi);
		paper.setAbstracts(abst);
		paper.setPdfUrl(pdfUrl);
		paper.setIssueId(issueid);
		PaperDao.addPaper(paper);
	}

	private void parseIssue(Page page) {
		Long journalid = JournalDao.selectPidByWebsite(Journal.class, page.prevUrl(2), TASKID);
		String year = (String) page.get("year");
		Issue issue = new Issue();
		issue.setIssue(page.urlText());
		issue.setYear(year);
		issue.setTaskId(TASKID);
		issue.setWebsite(page.url());
		issue.setJournalId(journalid);
		IssueDao.addIssue(issue);
	}

	private void parseJournal(Page page) {
		String title = page.select("title").text();
		String s = page.text();
		String issn = RegexUtil.getIssn(s);
		Journal journal = new Journal();
		journal.setWebsite(page.url());
		journal.setIssn(issn);
		journal.setTaskId(TASKID);
		journal.setJournalname(title);
		JournalDao.addJournal(journal);
	}

	public static void main(String[] args) {
		Ivyspring claw = new Ivyspring();
		claw.setBaseDirectory("E:/webSpider/Thelancet");
//		claw.getConfig().setThreadNumber(1);
		claw.addSeed("http://ivyspring.com/");
		claw.addSelector(".sub_nav_list li a", "journal");
		claw.addRegex("http.+/ms/archive", "archive");
//		claw.addSelector(".tdshade>tbody>tr>td>a", "issue");
		claw.addByHand();
		claw.addSelector("a:contains(Full text)", "paper");
		claw.start();
		
		
	}

}
