package com.geek.novel.services.parser.impl

import android.text.TextUtils
import com.geek.novel.entity.BookCrawler
import com.geek.novel.entity.BookIndex
import com.geek.novel.entity.BookRead
import com.geek.novel.entity.CrawlerSource
import com.geek.novel.services.parser.NovelSectionParser
import com.geek.novel.utils.BookUtil
import com.geek.novel.utils.FormatUtil
import com.geek.novel.utils.LogCatUtil
import com.geek.novel.vo.CrawlConfigVo
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.jsoup.nodes.Element
import org.jsoup.select.Elements
import java.util.*
import kotlin.collections.ArrayList


class NovelSectionParserImpl : NovelSectionParser {
	
	/**
	 * 解析书籍章节列表
	 *
	 * @param book
	 * @param html 文档对象
	 * @return  书籍章节列表对象集合
	 */
	override fun parseBookIndex(crawlerSource: CrawlerSource, book: BookRead, bookCrawler: BookCrawler, html: String?): List<BookIndex> {
		val result = LinkedList<BookIndex>()
		val document: Document = Jsoup.parse(html)

		val config = crawlerSource.getCrawlerConfig()
		val links: Elements = document.select(config.indexMatch)
		
		//获取更新到最后一章的位置
		val lastIndexId = FormatUtil.isNullToLongZero(bookCrawler.lastIndexId)
		val lastIndexName = FormatUtil.isNullToEmpty(bookCrawler.lastIndexName)

		val indexMin = config.indexMin
		val indexMaxOffset = config.indexMaxOffset
		if(indexMaxOffset < 1){
			throw RuntimeException("indexMaxOffset配置异常，不能小于1，请检查")
		}

		//第一个link是"↓直达页面底部↓",因此下标从1开始，这里逆序去获取是为了判断更新到书籍的位置
		for (i in links.size - indexMaxOffset downTo indexMin) {
			if(i >= links.size || i<0){
				continue
			}
			val link: Element = links[i]
			val href: String = link.attr("href")
			val sectionName: String = link.html()
			
			//获取到最新一章了
			if (lastIndexName == sectionName) {
				break
			}
			val bookIndex = BookIndex()
			bookIndex.bookId = book.bookId
			bookIndex.indexNum = i - 1
			bookIndex.indexName = sectionName
			bookIndex.sectionUrl = href
			bookIndex.crawler = false
			bookIndex.bookCrawlerId = "${bookCrawler.id}"

			//添加到list的第一个元素
			result.add(0, bookIndex)
		}
		for (bookIndex in result) {
			bookIndex.initEntity()
		}
		return result
	}
	
	/**
	 * 获取章节的当前一页的文本内容
	 * @param html  章节某一页的内容
	 */
	override fun getSectionContent(config: CrawlConfigVo, html: String?): String {
		if (FormatUtil.isEmpty(html)) {
			throw Exception("章节获取的页面内容为空！")
		}
		val document: Document = Jsoup.parse(html)
		val sectionElement = document.select(config.sectionMatch)

		if(sectionElement.isEmpty()){
			throw Exception("章节获取的节点内容为空！${config.sectionMatch}")
		}
		val content: Element = sectionElement[0]
		var result: String = content.html()

		try {
			var sectionCrawlerRules = config.sectionCrawlerRules
			if(sectionCrawlerRules == null){
				sectionCrawlerRules = ArrayList()
			}
			for(rule in sectionCrawlerRules){
				result = BookUtil.replaceContent(result, rule)
			}

			result = result.trim { it <= ' ' }
					.replace("\\n+".toRegex(), "\n")
		} catch (e: Exception) {
			LogCatUtil.e("???", e.message, e)
		}
		return result
	}
	
	/**
	 * 获取章节的下一页的url
	 * @param html  章节某一页的内容
	 */
	override fun getSectionNextPageUrl(crawlerSource: CrawlerSource, html: String?): String? {
		if (FormatUtil.isEmpty(html)) {
			throw Exception("章节获取的页面内容为空！")
		}
		val document: Document = Jsoup.parse(html)
		val nextPageElemId = crawlerSource.nextPageElemId
		if(TextUtils.isEmpty(nextPageElemId)){
			return null
		}
		val nextPage: Elements = document.select(nextPageElemId)
		if(nextPage.isEmpty()){
			return null
		}
		val href = nextPage.attr("href")
		if (!nextPage.html().contains("页") || FormatUtil.isEmpty(href)){
			return null
		}
		//有可能出现前端解析出来的url为：javascript:void(0);
		if(href.startsWith("javascript")){
			return null
		}
		return href
	}
	
}