package http;

import org.apache.commons.io.FileUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;

import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;

/**
 * Created by lynn on 2017/1/15.
 */
public class HttpPostDemo {
    public static void main(String[] args) throws IOException {
        System.out.println("running");

        // 爬取百科药物分类名称 http://baike.baidu.com/wikitag/taglist?tagId=75954
        String url = "http://baike.baidu.com/wikitag/api/getlemmas";
        HttpClient client = HttpClients.createDefault();
        HttpPost post = new HttpPost(url);
//        post.setHeader("Accept-Encoding", "gzip, deflate");
        post.setHeader("X-Requested-With", "XMLHttpRequest");
//        post.setHeader("Cookie","BAIDUID=B1D2EF244169C7A86350465A8E6487AA:FG=1; BDUSS=MyYVo4bUoxandVTXVaMjNVbmxDY2FxMUFlcHlwVWRNM2J4NXRESX5tQUhPNTlZSVFBQUFBJCQAAAAAAAAAAAEAAAAFBiIgWG1hcGxlcwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeud1gHrndYaj; cflag=15%3A3; PSTM=1484442401; BIDUPSID=6594CA737E07AF54A7A4955D919C8EB1");
//        post.setHeader("Content-Type:","application/x-www-form-urlencoded; charset=UTF-8");
//        post.setHeader("Referer","http://baike.baidu.com/wikitag/taglist?tagId=75954");
//        post.setHeader("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36");


        List<NameValuePair> form = new ArrayList<>();

        // 取得足够大，一页查完，就不用分页了  <-  后来发现想多了，百度服务器端会限制到100
        final String numPerPage = "10000";
        form.add(new BasicNameValuePair("tagId", "75954"));
        form.add(new BasicNameValuePair("limit", numPerPage));
        form.add(new BasicNameValuePair("timeout", "10000"));
        form.add(new BasicNameValuePair("filterTags", "[]"));
        form.add(new BasicNameValuePair("fromLemma", "false"));

        int remain = Integer.MAX_VALUE;
        int fetched = 0;
        int page = 0;
        List<String> tags = new ArrayList<>(10000);
        final List<JSONObject> diff = Collections.synchronizedList(new ArrayList<>());
        while (remain > 0 && page < Integer.MAX_VALUE) {
            form.add(new BasicNameValuePair("page", String.valueOf(page)));

            HttpEntity entity =new UrlEncodedFormEntity(form, StandardCharsets.UTF_8);
            post.setEntity(entity);
            post.setEntity(new UrlEncodedFormEntity(form));
            HttpResponse res = client.execute(post);
            if (res.getStatusLine().getStatusCode() == 200) {
                String s = EntityUtils.toString(res.getEntity());
                JSONObject jres = (JSONObject) JSONValue.parse(s);
                Iterable<JSONObject> lemmaList = (Iterable<JSONObject>) jres.get("lemmaList");
                int totalTags = Integer.parseInt(jres.get("total").toString());
                int totalPages = Integer.parseInt(jres.get("totalPage").toString());
                int currentPage = Integer.parseInt(jres.get("page").toString());
                List<String> tagList = StreamSupport.stream(lemmaList.spliterator(), true).map(j -> {
                    String t = (String) j.get("lemmaTitle");
                    String ct = (String) j.get("lemmaCroppedTitle");
                    if (!t.equals(ct)) {
                        diff.add(j);
                    }
                    return t;
                }).collect(Collectors.toList());
                tags.addAll(tagList);
                if (currentPage >= totalPages - 1) {
                    page = Integer.MAX_VALUE;
                } else {
                    page++;
                }
                remain = totalTags - fetched;
            } else {
                System.err.println(res.getStatusLine());
            }
        }

        FileUtils.writeLines(new File("out/tags.txt"), tags);
        if (diff.size() > 0) {
            FileUtils.writeLines(new File("out/diff.txt"), diff.stream()
                    .map(j -> String.format("%s\t%s\t%s", j.get("lemmaId"), j.get("lemmaTitle"), j.get("lemmaCroppedTitle")))
                    .collect(Collectors.toList()));
        }

        System.out.println("done");
    }
}
