package Preprocess;


import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.apache.log4j.Logger;

import java.io.*;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;

public class Step1_2_segment_for_lines extends Thread{

    public static void main(String[] args) throws IOException, InterruptedException {
        File curFile = new File("d:/data/videodata/webvideodata20171020_90_drop.utf8");
        BufferedReader br = new BufferedReader(new InputStreamReader(
                new FileInputStream(curFile), "utf8"));
        BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
                new FileOutputStream("d:/data/webvideodata20171020_90_drop_seg.txt"),
                                "utf8"));

        int cnt = 0;
        String content = null;
        while ((content = br.readLine()) != null) {
            List<Term> terms =HanLP.segment(content);
            StringBuilder line = new StringBuilder();
            for (Term t : terms) {
                line.append(t.word + ' ');
            }
            cnt++;
            bw.write(line.toString() + '\n');
            System.out.println(cnt + " / 2778552");
        }
    }
}


/*
import pandas as pd
seg = pd.read_csv("webvideodata20171020_90.utf8_fixed_seg.utf8", sep='\t', encoding='utf8', header=None)
seg = seg.drop_duplicates()
seg.to_csv("webvideodata20171020_90.utf8_fixed_seg.utf8", encoding='utf8', header=False, index=False)
*/