package com.xueke.crawler.service.createLeafDocRelation;


import com.xueke.crawler.helper.HttpHelper;
import com.xueke.crawler.model.SubjectTree;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.List;

/**
 * Created by weibo on 2017/5/13.
 *
 * @author weibo.qin
 * @version 2017/5/13
 */
@Component("ywDocRelation")
public class YwDocRelation extends BaseCreateDocRelation {

    String domain = "http://yw.zxxk.com";

    @Override
    public void start() {
        int pageSize = 10;

        int p = 0;
        for (int i = 0; ; i++) {
            List<SubjectTree> list = subjectTreeMapper.querySubjectTree(i * pageSize, pageSize);

            if (list.size() == 0) {
                System.out.println("语文关系表执行完毕！");
                break;
            }

            for (int j = 0; j < list.size(); j++) {
                Integer nodeId = list.get(j).getNid();
                Integer pnId = list.get(j).getPnid();
                Integer cId = list.get(j).getCid();
                Integer isDeaf = list.get(j).getIsDeaf();
                String uri = list.get(j).getDomain() + list.get(j).getUri();
                System.out.println(cId + "-----" + pnId + "------" + nodeId + "--------------" + uri + "-----" + p++);
                try {
                    parseSingleLink(uri, domain, nodeId, pnId, cId, isDeaf);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

        }
    }


    private void parseSingleLink(String tUrl, String domain, int leafId, Integer pnId, Integer cId, Integer isDeaf)
        throws IOException {
        int page = 1;
        while (true) {
            page = crawle(domain, leafId, tUrl, page, pnId, cId, isDeaf);
            if (page == 0) {
                System.out.println("叶子节点=" + leafId + ", 题目采集完毕，已经到达尾页-------");
                break;
            }
        }
    }


    private int crawle(String domain, int leafId, String url, int pageNum, Integer pnId, Integer cId, Integer isDeaf)
        throws IOException {

        url = getUrl(url, pageNum);
        Document doc = HttpHelper.getSimpleDocument(url);
        if (doc == null) {
            return pageNum + 1;
        }
        Elements elms = doc.select("div.clearfix.list-item");
        if (elms.size() == 0) return 0;
        for (Element elm : elms) {
            String docId = elm.attr("id");
            String docUrl = elm.select("a.high_light").first().attr("href");
            System.out.println(docId + "------------" + docUrl);
            String emdown = elm.select(".dn-btn div").first().text();
            int level = parseLevel(emdown);
            saveLessonRelation(leafId, docId, docUrl, pnId, cId, level);
        }
        return pageNum + 1;
    }

    private String getUrl(String url, int page) {
        return url + "&page=" + page;

    }

    private String getNextUrl(String domain, Document doc) {
        Elements as = doc.select("div.paging a");
        for (Element a : as) {
            if (a.text().equals("下页")) {
                return domain + a.attr("href");
            }
        }
        return null;
    }


}
