code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
public static double ChisquareInverseCdf(double p, int df)
{
final double CHI_EPSILON = 0.000001; /* Accuracy of critchi approximation */
final double CHI_MAX = 99999.0; /* Maximum chi-square value */
double minchisq = 0.0;
double maxchisq = CHI_MAX;
double chisqval = 0.0;
if (p <= 0.0)
{
return CHI_MAX;
}
else if (p >= 1.0)
{
return 0.0;
}
chisqval = df / Math.sqrt(p); /* fair first value */
while ((maxchisq - minchisq) > CHI_EPSILON)
{
if (1 - ChisquareCdf(chisqval, df) < p)
{
maxchisq = chisqval;
}
else
{
minchisq = chisqval;
}
chisqval = (maxchisq + minchisq) * 0.5;
}
return chisqval;
} | 给定卡方分布的p值和自由度,返回卡方值。内部采用二分搜索实现,移植自JS代码:
http://www.fourmilab.ch/rpkp/experiments/analysis/chiCalc.js
@param p p值(置信度)
@param df
@return |
public int getFrequency(String from, String to)
{
return getFrequency(convert(from), convert(to));
} | 获取转移频次
@param from
@param to
@return |
public int getFrequency(E from, E to)
{
return matrix[from.ordinal()][to.ordinal()];
} | 获取转移频次
@param from
@param to
@return |
static boolean makeCoreDictionary(String inPath, String outPath)
{
final DictionaryMaker dictionaryMaker = new DictionaryMaker();
final TreeSet<String> labelSet = new TreeSet<String>();
CorpusLoader.walk(inPath, new CorpusLoader.Handler()
{
@Override
public void handle(Document document)
{
for (List<Word> sentence : document.getSimpleSentenceList(true))
{
for (Word word : sentence)
{
if (shouldInclude(word))
dictionaryMaker.add(word);
}
}
// for (List<Word> sentence : document.getSimpleSentenceList(false))
// {
// for (Word word : sentence)
// {
// if (shouldInclude(word))
// dictionaryMaker.add(word);
// }
// }
}
/**
* 是否应当计算这个词语
* @param word
* @return
*/
boolean shouldInclude(Word word)
{
if ("m".equals(word.label) || "mq".equals(word.label) || "w".equals(word.label) || "t".equals(word.label))
{
if (!TextUtility.isAllChinese(word.value)) return false;
}
else if ("nr".equals(word.label))
{
return false;
}
return true;
}
});
if (outPath != null)
return dictionaryMaker.saveTxtTo(outPath);
return false;
} | 指定语料库文件夹,制作一份词频词典
@return |
public void put(String key, V value)
{
if (key.length() == 0) return; // 安全起见
BaseNode branch = this;
char[] chars = key.toCharArray();
for (int i = 0; i < chars.length - 1; ++i)
{
// 除了最后一个字外,都是继续
branch.addChild(new Node(chars[i], Status.NOT_WORD_1, null));
branch = branch.getChild(chars[i]);
}
// 最后一个字加入时属性为end
if (branch.addChild(new Node<V>(chars[chars.length - 1], Status.WORD_END_3, value)))
{
++size; // 维护size
}
} | 插入一个词
@param key
@param value |
public void remove(String key)
{
BaseNode branch = this;
char[] chars = key.toCharArray();
for (int i = 0; i < chars.length - 1; ++i)
{
if (branch == null) return;
branch = branch.getChild(chars[i]);
}
if (branch == null) return;
// 最后一个字设为undefined
if (branch.addChild(new Node(chars[chars.length - 1], Status.UNDEFINED_0, value)))
{
--size;
}
} | 删除一个词
@param key |
public Set<Map.Entry<String, V>> entrySet()
{
Set<Map.Entry<String, V>> entrySet = new TreeSet<Map.Entry<String, V>>();
StringBuilder sb = new StringBuilder();
for (BaseNode node : child)
{
if (node == null) continue;
node.walk(new StringBuilder(sb.toString()), entrySet);
}
return entrySet;
} | 获取键值对集合
@return |
public Set<String> keySet()
{
TreeSet<String> keySet = new TreeSet<String>();
for (Map.Entry<String, V> entry : entrySet())
{
keySet.add(entry.getKey());
}
return keySet;
} | 键集合
@return |
public Set<Map.Entry<String, V>> prefixSearch(String key)
{
Set<Map.Entry<String, V>> entrySet = new TreeSet<Map.Entry<String, V>>();
StringBuilder sb = new StringBuilder(key.substring(0, key.length() - 1));
BaseNode branch = this;
char[] chars = key.toCharArray();
for (char aChar : chars)
{
if (branch == null) return entrySet;
branch = branch.getChild(aChar);
}
if (branch == null) return entrySet;
branch.walk(sb, entrySet);
return entrySet;
} | 前缀查询
@param key 查询串
@return 键值对 |
public LinkedList<Map.Entry<String, V>> commonPrefixSearchWithValue(String key)
{
char[] chars = key.toCharArray();
return commonPrefixSearchWithValue(chars, 0);
} | 前缀查询,包含值
@param key 键
@return 键值对列表 |
public LinkedList<Map.Entry<String, V>> commonPrefixSearchWithValue(char[] chars, int begin)
{
LinkedList<Map.Entry<String, V>> result = new LinkedList<Map.Entry<String, V>>();
StringBuilder sb = new StringBuilder();
BaseNode branch = this;
for (int i = begin; i < chars.length; ++i)
{
char aChar = chars[i];
branch = branch.getChild(aChar);
if (branch == null || branch.status == Status.UNDEFINED_0) return result;
sb.append(aChar);
if (branch.status == Status.WORD_MIDDLE_2 || branch.status == Status.WORD_END_3)
{
result.add(new AbstractMap.SimpleEntry<String, V>(sb.toString(), (V) branch.value));
}
}
return result;
} | 前缀查询,通过字符数组来表示字符串可以优化运行速度
@param chars 字符串的字符数组
@param begin 开始的下标
@return |
public boolean save(DataOutputStream out)
{
try
{
for (BaseNode node : child)
{
if (node == null)
{
out.writeInt(0);
}
else
{
out.writeInt(1);
node.walkToSave(out);
}
}
}
catch (Exception e)
{
logger.warning("保存到" + out + "失败" + TextUtility.exceptionToString(e));
return false;
}
return true;
} | 保存到二进制输出流
@param out
@return |
public boolean load(String path, V[] value)
{
byte[] bytes = IOUtil.readBytes(path);
if (bytes == null) return false;
_ValueArray valueArray = new _ValueArray(value);
ByteArray byteArray = new ByteArray(bytes);
for (int i = 0; i < child.length; ++i)
{
int flag = byteArray.nextInt();
if (flag == 1)
{
child[i] = new Node<V>();
child[i].walkToLoad(byteArray, valueArray);
}
}
size = value.length;
return true;
} | 从磁盘加载二分数组树
@param path 路径
@param value 额外提供的值数组,按照值的字典序。(之所以要求提供它,是因为泛型的保存不归树管理)
@return 是否成功 |
public void parseLongestText(String text, AhoCorasickDoubleArrayTrie.IHit<V> processor)
{
int length = text.length();
for (int i = 0; i < length; ++i)
{
BaseNode<V> state = transition(text.charAt(i));
if (state != null)
{
int to = i + 1;
int end = to;
V value = state.getValue();
for (; to < length; ++to)
{
state = state.transition(text.charAt(to));
if (state == null) break;
if (state.getValue() != null)
{
value = state.getValue();
end = to + 1;
}
}
if (value != null)
{
processor.hit(i, end, value);
i = end - 1;
}
}
}
} | 最长匹配
@param text 文本
@param processor 处理器 |
public void parseText(String text, AhoCorasickDoubleArrayTrie.IHit<V> processor)
{
int length = text.length();
int begin = 0;
BaseNode<V> state = this;
for (int i = begin; i < length; ++i)
{
state = state.transition(text.charAt(i));
if (state != null)
{
V value = state.getValue();
if (value != null)
{
processor.hit(begin, i + 1, value);
}
}
else
{
i = begin;
++begin;
state = this;
}
}
} | 匹配文本
@param text 文本
@param processor 处理器 |
public static boolean isTerminal(ArrayList<Configuration> beam)
{
for (Configuration configuration : beam)
if (!configuration.state.isTerminalState())
return false;
return true;
} | Shows true if all of the configurations in the beam are in the terminal state
@param beam the current beam
@return true if all of the configurations in the beam are in the terminal state |
protected List<String> preprocess(String document)
{
List<Term> termList = segment.seg(document);
ListIterator<Term> listIterator = termList.listIterator();
while (listIterator.hasNext())
{
Term term = listIterator.next();
if (CoreStopWordDictionary.contains(term.word) ||
term.nature.startsWith("w")
)
{
listIterator.remove();
}
}
List<String> wordList = new ArrayList<String>(termList.size());
for (Term term : termList)
{
wordList.add(term.word);
}
return wordList;
} | 重载此方法实现自己的预处理逻辑(预处理、分词、去除停用词)
@param document 文档
@return 单词列表 |
public Document<K> addDocument(K id, String document)
{
return addDocument(id, preprocess(document));
} | 添加文档
@param id 文档id
@param document 文档内容
@return 文档对象 |
public Document<K> addDocument(K id, List<String> document)
{
SparseVector vector = toVector(document);
Document<K> d = new Document<K>(id, vector);
return documents_.put(id, d);
} | 添加文档
@param id 文档id
@param document 文档内容
@return 文档对象 |
public List<Set<K>> kmeans(int nclusters)
{
Cluster<K> cluster = new Cluster<K>();
for (Document<K> document : documents_.values())
{
cluster.add_document(document);
}
cluster.section(nclusters);
refine_clusters(cluster.sectioned_clusters());
List<Cluster<K>> clusters_ = new ArrayList<Cluster<K>>(nclusters);
for (Cluster<K> s : cluster.sectioned_clusters())
{
s.refresh();
clusters_.add(s);
}
return toResult(clusters_);
} | k-means聚类
@param nclusters 簇的数量
@return 指定数量的簇(Set)构成的集合 |
public List<Set<K>> repeatedBisection(int nclusters, double limit_eval)
{
Cluster<K> cluster = new Cluster<K>();
List<Cluster<K>> clusters_ = new ArrayList<Cluster<K>>(nclusters > 0 ? nclusters : 16);
for (Document<K> document : documents_.values())
{
cluster.add_document(document);
}
PriorityQueue<Cluster<K>> que = new PriorityQueue<Cluster<K>>();
cluster.section(2);
refine_clusters(cluster.sectioned_clusters());
cluster.set_sectioned_gain();
cluster.composite_vector().clear();
que.add(cluster);
while (!que.isEmpty())
{
if (nclusters > 0 && que.size() >= nclusters)
break;
cluster = que.peek();
if (cluster.sectioned_clusters().size() < 1)
break;
if (limit_eval > 0 && cluster.sectioned_gain() < limit_eval)
break;
que.poll();
List<Cluster<K>> sectioned = cluster.sectioned_clusters();
for (Cluster<K> c : sectioned)
{
c.section(2);
refine_clusters(c.sectioned_clusters());
c.set_sectioned_gain();
if (c.sectioned_gain() < limit_eval)
{
for (Cluster<K> sub : c.sectioned_clusters())
{
sub.clear();
}
}
c.composite_vector().clear();
que.add(c);
}
}
while (!que.isEmpty())
{
clusters_.add(0, que.poll());
}
return toResult(clusters_);
} | repeated bisection 聚类
@param nclusters 簇的数量
@param limit_eval 准则函数增幅阈值
@return 指定数量的簇(Set)构成的集合 |
double refine_clusters(List<Cluster<K>> clusters)
{
double[] norms = new double[clusters.size()];
int offset = 0;
for (Cluster cluster : clusters)
{
norms[offset++] = cluster.composite_vector().norm();
}
double eval_cluster = 0.0;
int loop_count = 0;
while (loop_count++ < NUM_REFINE_LOOP)
{
List<int[]> items = new ArrayList<int[]>(documents_.size());
for (int i = 0; i < clusters.size(); i++)
{
for (int j = 0; j < clusters.get(i).documents().size(); j++)
{
items.add(new int[]{i, j});
}
}
Collections.shuffle(items);
boolean changed = false;
for (int[] item : items)
{
int cluster_id = item[0];
int item_id = item[1];
Cluster<K> cluster = clusters.get(cluster_id);
Document<K> doc = cluster.documents().get(item_id);
double value_base = refined_vector_value(cluster.composite_vector(), doc.feature(), -1);
double norm_base_moved = Math.pow(norms[cluster_id], 2) + value_base;
norm_base_moved = norm_base_moved > 0 ? Math.sqrt(norm_base_moved) : 0.0;
double eval_max = -1.0;
double norm_max = 0.0;
int max_index = 0;
for (int j = 0; j < clusters.size(); j++)
{
if (cluster_id == j)
continue;
Cluster<K> other = clusters.get(j);
double value_target = refined_vector_value(other.composite_vector(), doc.feature(), 1);
double norm_target_moved = Math.pow(norms[j], 2) + value_target;
norm_target_moved = norm_target_moved > 0 ? Math.sqrt(norm_target_moved) : 0.0;
double eval_moved = norm_base_moved + norm_target_moved - norms[cluster_id] - norms[j];
if (eval_max < eval_moved)
{
eval_max = eval_moved;
norm_max = norm_target_moved;
max_index = j;
}
}
if (eval_max > 0)
{
eval_cluster += eval_max;
clusters.get(max_index).add_document(doc);
clusters.get(cluster_id).remove_document(item_id);
norms[cluster_id] = norm_base_moved;
norms[max_index] = norm_max;
changed = true;
}
}
if (!changed)
break;
for (Cluster<K> cluster : clusters)
{
cluster.refresh();
}
}
return eval_cluster;
} | 根据k-means算法迭代优化聚类
@param clusters 簇
@return 准则函数的值 |
double refined_vector_value(SparseVector composite, SparseVector vec, int sign)
{
double sum = 0.0;
for (Map.Entry<Integer, Double> entry : vec.entrySet())
{
sum += Math.pow(entry.getValue(), 2) + sign * 2 * composite.get(entry.getKey()) * entry.getValue();
}
return sum;
} | c^2 - 2c(a + c) + d^2 - 2d(b + d)
@param composite (a+c,b+d)
@param vec (c,d)
@param sign
@return |
public static double evaluate(String folderPath, String algorithm)
{
if (folderPath == null) throw new IllegalArgumentException("参数 folderPath == null");
File root = new File(folderPath);
if (!root.exists()) throw new IllegalArgumentException(String.format("目录 %s 不存在", root.getAbsolutePath()));
if (!root.isDirectory())
throw new IllegalArgumentException(String.format("目录 %s 不是一个目录", root.getAbsolutePath()));
ClusterAnalyzer<String> analyzer = new ClusterAnalyzer<String>();
File[] folders = root.listFiles();
if (folders == null) return 1.;
logger.start("根目录:%s\n加载中...\n", folderPath);
int docSize = 0;
int[] ni = new int[folders.length];
String[] cat = new String[folders.length];
int offset = 0;
for (File folder : folders)
{
if (folder.isFile()) continue;
File[] files = folder.listFiles();
if (files == null) continue;
String category = folder.getName();
cat[offset] = category;
logger.out("[%s]...", category);
int b = 0;
int e = files.length;
int logEvery = (int) Math.ceil((e - b) / 10000f);
for (int i = b; i < e; i++)
{
analyzer.addDocument(folder.getName() + " " + files[i].getName(), IOUtil.readTxt(files[i].getAbsolutePath()));
if (i % logEvery == 0)
{
logger.out("%c[%s]...%.2f%%", 13, category, MathUtility.percentage(i - b + 1, e - b));
}
++docSize;
++ni[offset];
}
logger.out(" %d 篇文档\n", e - b);
++offset;
}
logger.finish(" 加载了 %d 个类目,共 %d 篇文档\n", folders.length, docSize);
logger.start(algorithm + "聚类中...");
List<Set<String>> clusterList = algorithm.replaceAll("[-\\s]", "").toLowerCase().equals("kmeans") ?
analyzer.kmeans(ni.length) : analyzer.repeatedBisection(ni.length);
logger.finish(" 完毕。\n");
double[] fi = new double[ni.length];
for (int i = 0; i < ni.length; i++)
{
for (Set<String> j : clusterList)
{
int nij = 0;
for (String d : j)
{
if (d.startsWith(cat[i]))
++nij;
}
if (nij == 0) continue;
double p = nij / (double) (j.size());
double r = nij / (double) (ni[i]);
double f = 2 * p * r / (p + r);
fi[i] = Math.max(fi[i], f);
}
}
double f = 0;
for (int i = 0; i < fi.length; i++)
{
f += fi[i] * ni[i] / docSize;
}
return f;
} | 训练模型
@param folderPath 分类语料的根目录.目录必须满足如下结构:<br>
根目录<br>
├── 分类A<br>
│ └── 1.txt<br>
│ └── 2.txt<br>
│ └── 3.txt<br>
├── 分类B<br>
│ └── 1.txt<br>
│ └── ...<br>
└── ...<br>
文件不一定需要用数字命名,也不需要以txt作为后缀名,但一定需要是文本文件.
@param algorithm kmeans 或 repeated bisection
@throws IOException 任何可能的IO异常 |
public void calcCost(Node node)
{
node.cost = 0.0;
if (alphaFloat_ != null)
{
float c = 0.0f;
for (int i = 0; node.fVector.get(i) != -1; i++)
{
c += alphaFloat_[node.fVector.get(i) + node.y];
}
node.cost = costFactor_ * c;
}
else
{
double c = 0.0;
for (int i = 0; node.fVector.get(i) != -1; i++)
{
c += alpha_[node.fVector.get(i) + node.y];
}
node.cost = costFactor_ * c;
}
} | 计算状态特征函数的代价
@param node |
public void calcCost(Path path)
{
path.cost = 0.0;
if (alphaFloat_ != null)
{
float c = 0.0f;
for (int i = 0; path.fvector.get(i) != -1; i++)
{
c += alphaFloat_[path.fvector.get(i) + path.lnode.y * y_.size() + path.rnode.y];
}
path.cost = costFactor_ * c;
}
else
{
double c = 0.0;
for (int i = 0; path.fvector.get(i) != -1; i++)
{
c += alpha_[path.fvector.get(i) + path.lnode.y * y_.size() + path.rnode.y];
}
path.cost = costFactor_ * c;
}
} | 计算转移特征函数的代价
@param path 边 |
public List<IWord> getWordList()
{
List<IWord> wordList = new LinkedList<IWord>();
for (Sentence sentence : sentenceList)
{
wordList.addAll(sentence.wordList);
}
return wordList;
} | 获取单词序列
@return |
public List<List<Word>> getSimpleSentenceList()
{
List<List<Word>> simpleList = new LinkedList<List<Word>>();
for (Sentence sentence : sentenceList)
{
List<Word> wordList = new LinkedList<Word>();
for (IWord word : sentence.wordList)
{
if (word instanceof CompoundWord)
{
for (Word inner : ((CompoundWord) word).innerList)
{
wordList.add(inner);
}
}
else
{
wordList.add((Word) word);
}
}
simpleList.add(wordList);
}
return simpleList;
} | 获取简单的句子列表,其中复合词会被拆分为简单词
@return |
public List<List<IWord>> getComplexSentenceList()
{
List<List<IWord>> complexList = new LinkedList<List<IWord>>();
for (Sentence sentence : sentenceList)
{
complexList.add(sentence.wordList);
}
return complexList;
} | 获取复杂句子列表,句子中的每个单词有可能是复合词,有可能是简单词
@return |
public List<List<Word>> getSimpleSentenceList(Set<String> labelSet)
{
List<List<Word>> simpleList = new LinkedList<List<Word>>();
for (Sentence sentence : sentenceList)
{
List<Word> wordList = new LinkedList<Word>();
for (IWord word : sentence.wordList)
{
if (word instanceof CompoundWord)
{
if (labelSet.contains(word.getLabel()))
{
for (Word inner : ((CompoundWord) word).innerList)
{
wordList.add(inner);
}
}
else
{
wordList.add(((CompoundWord) word).toWord());
}
}
else
{
wordList.add((Word) word);
}
}
simpleList.add(wordList);
}
return simpleList;
} | 获取简单的句子列表,其中复合词的标签如果是set中指定的话会被拆分为简单词
@param labelSet
@return |
public static <T> T[] shrink(T[] from, T[] to)
{
assert to.length <= from.length;
System.arraycopy(from, 0, to, 0, to.length);
return to;
} | 数组分割
@param from 源
@param to 目标
@param <T> 类型
@return 目标 |
private static int binarySearch(int[] a, int fromIndex, int length, int key)
{
int low = fromIndex;
int high = fromIndex + length - 1;
while (low <= high)
{
int mid = (low + high) >>> 1;
int midVal = a[mid << 1];
if (midVal < key)
low = mid + 1;
else if (midVal > key)
high = mid - 1;
else
return mid; // key found
}
return -(low + 1); // key not found.
} | 二分搜索,由于二元接续前一个词固定时,后一个词比较少,所以二分也能取得很高的性能
@param a 目标数组
@param fromIndex 开始下标
@param length 长度
@param key 词的id
@return 共现频次 |
public static int getBiFrequency(String a, String b)
{
int idA = CoreDictionary.trie.exactMatchSearch(a);
if (idA == -1)
{
return 0;
}
int idB = CoreDictionary.trie.exactMatchSearch(b);
if (idB == -1)
{
return 0;
}
int index = binarySearch(pair, start[idA], start[idA + 1] - start[idA], idB);
if (index < 0) return 0;
index <<= 1;
return pair[index + 1];
} | 获取共现频次
@param a 第一个词
@param b 第二个词
@return 第一个词@第二个词出现的频次 |
public static int getBiFrequency(int idA, int idB)
{
// 负数id表示来自用户词典的词语的词频(用户自定义词语没有id),返回正值增加其亲和度
if (idA < 0)
{
return -idA;
}
if (idB < 0)
{
return -idB;
}
int index = binarySearch(pair, start[idA], start[idA + 1] - start[idA], idB);
if (index < 0) return 0;
index <<= 1;
return pair[index + 1];
} | 获取共现频次
@param idA 第一个词的id
@param idB 第二个词的id
@return 共现频次 |
public static boolean reload()
{
String biGramDictionaryPath = HanLP.Config.BiGramDictionaryPath;
IOUtil.deleteFile(biGramDictionaryPath + ".table" + Predefine.BIN_EXT);
return load(biGramDictionaryPath);
} | 热更新二元接续词典<br>
集群环境(或其他IOAdapter)需要自行删除缓存文件
@return 是否成功 |
public boolean saveNGramToTxt(String path)
{
try
{
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(IOUtil.newOutputStream(path)));
for (Map.Entry<String, Integer> entry : trie.entrySet())
{
bw.write(entry.getKey() + " " + entry.getValue());
bw.newLine();
}
bw.close();
}
catch (Exception e)
{
logger.warning("在保存NGram词典到" + path + "时发生异常" + e);
return false;
}
return true;
} | 保存NGram词典
@param path
@return |
public static void recognition(List<Vertex> segResult, WordNet wordNetOptimum, WordNet wordNetAll)
{
StringBuilder sbName = new StringBuilder();
int appendTimes = 0;
ListIterator<Vertex> listIterator = segResult.listIterator();
listIterator.next();
int line = 1;
int activeLine = 1;
while (listIterator.hasNext())
{
Vertex vertex = listIterator.next();
if (appendTimes > 0)
{
if (vertex.guessNature() == Nature.nrf || TranslatedPersonDictionary.containsKey(vertex.realWord))
{
sbName.append(vertex.realWord);
++appendTimes;
}
else
{
// 识别结束
if (appendTimes > 1)
{
if (HanLP.Config.DEBUG)
{
System.out.println("音译人名识别出:" + sbName.toString());
}
wordNetOptimum.insert(activeLine, new Vertex(Predefine.TAG_PEOPLE, sbName.toString(), new CoreDictionary.Attribute(Nature.nrf), WORD_ID), wordNetAll);
}
sbName.setLength(0);
appendTimes = 0;
}
}
else
{
// nrf触发识别
if (vertex.guessNature() == Nature.nrf
// || TranslatedPersonDictionary.containsKey(vertex.realWord)
)
{
sbName.append(vertex.realWord);
++appendTimes;
activeLine = line;
}
}
line += vertex.realWord.length();
}
} | 执行识别
@param segResult 粗分结果
@param wordNetOptimum 粗分结果对应的词图
@param wordNetAll 全词图 |
private int resize(int newSize)
{
int[] base2 = new int[newSize];
int[] check2 = new int[newSize];
if (allocSize > 0)
{
System.arraycopy(base, 0, base2, 0, allocSize);
System.arraycopy(check, 0, check2, 0, allocSize);
}
base = base2;
check = check2;
return allocSize = newSize;
} | 拓展数组
@param newSize
@return |
private int fetch(Node parent, List<Node> siblings)
{
if (error_ < 0)
return 0;
int prev = 0;
for (int i = parent.left; i < parent.right; i++)
{
if ((length != null ? length[i] : key.get(i).length()) < parent.depth)
continue;
String tmp = key.get(i);
int cur = 0;
if ((length != null ? length[i] : tmp.length()) != parent.depth)
cur = (int) tmp.charAt(parent.depth) + 1;
if (prev > cur)
{
error_ = -3;
return 0;
}
if (cur != prev || siblings.size() == 0)
{
Node tmp_node = new Node();
tmp_node.depth = parent.depth + 1;
tmp_node.code = cur;
tmp_node.left = i;
if (siblings.size() != 0)
siblings.get(siblings.size() - 1).right = i;
siblings.add(tmp_node);
}
prev = cur;
}
if (siblings.size() != 0)
siblings.get(siblings.size() - 1).right = parent.right;
return siblings.size();
} | 获取直接相连的子节点
@param parent 父节点
@param siblings (子)兄弟节点
@return 兄弟节点个数 |
private int insert(List<Node> siblings, BitSet used)
{
if (error_ < 0)
return 0;
int begin = 0;
int pos = Math.max(siblings.get(0).code + 1, nextCheckPos) - 1;
int nonzero_num = 0;
int first = 0;
if (allocSize <= pos)
resize(pos + 1);
outer:
// 此循环体的目标是找出满足base[begin + a1...an] == 0的n个空闲空间,a1...an是siblings中的n个节点
while (true)
{
pos++;
if (allocSize <= pos)
resize(pos + 1);
if (check[pos] != 0)
{
nonzero_num++;
continue;
}
else if (first == 0)
{
nextCheckPos = pos;
first = 1;
}
begin = pos - siblings.get(0).code; // 当前位置离第一个兄弟节点的距离
if (allocSize <= (begin + siblings.get(siblings.size() - 1).code))
{
resize(begin + siblings.get(siblings.size() - 1).code + Character.MAX_VALUE);
}
//if (used[begin])
// continue;
if(used.get(begin)){
continue;
}
for (int i = 1; i < siblings.size(); i++)
if (check[begin + siblings.get(i).code] != 0)
continue outer;
break;
}
// -- Simple heuristics --
// if the percentage of non-empty contents in check between the
// index
// 'next_check_pos' and 'check' is greater than some constant value
// (e.g. 0.9),
// new 'next_check_pos' index is written by 'check'.
if (1.0 * nonzero_num / (pos - nextCheckPos + 1) >= 0.95)
nextCheckPos = pos; // 从位置 next_check_pos 开始到 pos 间,如果已占用的空间在95%以上,下次插入节点时,直接从 pos 位置处开始查找
//used[begin] = true;
used.set(begin);
size = (size > begin + siblings.get(siblings.size() - 1).code + 1) ? size
: begin + siblings.get(siblings.size() - 1).code + 1;
for (int i = 0; i < siblings.size(); i++)
{
check[begin + siblings.get(i).code] = begin;
// System.out.println(this);
}
for (int i = 0; i < siblings.size(); i++)
{
List<Node> new_siblings = new ArrayList<Node>();
if (fetch(siblings.get(i), new_siblings) == 0) // 一个词的终止且不为其他词的前缀
{
base[begin + siblings.get(i).code] = (value != null) ? (-value[siblings
.get(i).left] - 1) : (-siblings.get(i).left - 1);
// System.out.println(this);
if (value != null && (-value[siblings.get(i).left] - 1) >= 0)
{
error_ = -2;
return 0;
}
progress++;
// if (progress_func_) (*progress_func_) (progress,
// keySize);
}
else
{
int h = insert(new_siblings, used); // dfs
base[begin + siblings.get(i).code] = h;
// System.out.println(this);
}
}
return begin;
} | 插入节点
@param siblings 等待插入的兄弟节点
@return 插入位置 |
public int build(Set<Map.Entry<String, V>> entrySet)
{
List<String> keyList = new ArrayList<String>(entrySet.size());
List<V> valueList = new ArrayList<V>(entrySet.size());
for (Map.Entry<String, V> entry : entrySet)
{
keyList.add(entry.getKey());
valueList.add(entry.getValue());
}
return build(keyList, valueList);
} | 构建DAT
@param entrySet 注意此entrySet一定要是字典序的!否则会失败
@return |
public int build(TreeMap<String, V> keyValueMap)
{
assert keyValueMap != null;
Set<Map.Entry<String, V>> entrySet = keyValueMap.entrySet();
return build(entrySet);
} | 方便地构造一个双数组trie树
@param keyValueMap 升序键值对map
@return 构造结果 |
public int build(List<String> _key, int _length[], int _value[],
int _keySize)
{
if (_key == null || _keySize > _key.size())
return 0;
// progress_func_ = progress_func;
key = _key;
length = _length;
keySize = _keySize;
value = _value;
progress = 0;
allocSize = 0;
resize(65536 * 32); // 32个双字节
base[0] = 1;
nextCheckPos = 0;
Node root_node = new Node();
root_node.left = 0;
root_node.right = keySize;
root_node.depth = 0;
List<Node> siblings = new ArrayList<Node>();
fetch(root_node, siblings);
insert(siblings, new BitSet());
shrink();
// size += (1 << 8 * 2) + 1; // ???
// if (size >= allocSize) resize (size);
key = null;
length = null;
return error_;
} | 唯一的构建方法
@param _key 值set,必须字典序
@param _length 对应每个key的长度,留空动态获取
@param _value 每个key对应的值,留空使用key的下标作为值
@param _keySize key的长度,应该设为_key.size
@return 是否出错 |
public boolean save(DataOutputStream out)
{
try
{
out.writeInt(size);
for (int i = 0; i < size; i++)
{
out.writeInt(base[i]);
out.writeInt(check[i]);
}
}
catch (Exception e)
{
return false;
}
return true;
} | 将base和check保存下来
@param out
@return |
public boolean load(String path, List<V> value)
{
if (!loadBaseAndCheck(path)) return false;
v = (V[]) value.toArray();
return true;
} | 从磁盘加载,需要额外提供值
@param path
@param value
@return |
public boolean load(String path, V[] value)
{
if (!(IOAdapter == null ? loadBaseAndCheckByFileChannel(path) :
load(ByteArrayStream.createByteArrayStream(path), value)
)) return false;
v = value;
return true;
} | 从磁盘加载,需要额外提供值
@param path
@param value
@return |
public boolean load(byte[] bytes, int offset, V[] value)
{
if (bytes == null) return false;
size = ByteUtil.bytesHighFirstToInt(bytes, offset);
offset += 4;
base = new int[size + 65535]; // 多留一些,防止越界
check = new int[size + 65535];
for (int i = 0; i < size; i++)
{
base[i] = ByteUtil.bytesHighFirstToInt(bytes, offset);
offset += 4;
check[i] = ByteUtil.bytesHighFirstToInt(bytes, offset);
offset += 4;
}
v = value;
return true;
} | 从字节数组加载(发现在MacOS上,此方法比ByteArray更快)
@param bytes
@param offset
@param value
@return |
private boolean loadBaseAndCheck(String path)
{
try
{
DataInputStream in = new DataInputStream(new BufferedInputStream(IOAdapter == null ?
new FileInputStream(path) :
IOAdapter.open(path)
));
size = in.readInt();
base = new int[size + 65535]; // 多留一些,防止越界
check = new int[size + 65535];
for (int i = 0; i < size; i++)
{
base[i] = in.readInt();
check[i] = in.readInt();
}
}
catch (Exception e)
{
return false;
}
return true;
} | 从磁盘加载双数组
@param path
@return |
public boolean serializeTo(String path)
{
ObjectOutputStream out = null;
try
{
out = new ObjectOutputStream(IOUtil.newOutputStream(path));
out.writeObject(this);
}
catch (Exception e)
{
// e.printStackTrace();
return false;
}
return true;
} | 将自己序列化到
@param path
@return |
public int exactMatchSearch(char[] keyChars, int pos, int len, int nodePos)
{
int result = -1;
int b = base[nodePos];
int p;
for (int i = pos; i < len; i++)
{
p = b + (int) (keyChars[i]) + 1;
if (b == check[p])
b = base[p];
else
return result;
}
p = b;
int n = base[p];
if (b == check[p] && n < 0)
{
result = -n - 1;
}
return result;
} | 精确查询
@param keyChars 键的char数组
@param pos char数组的起始位置
@param len 键的长度
@param nodePos 开始查找的位置(本参数允许从非根节点查询)
@return 查到的节点代表的value ID,负数表示不存在 |
public List<Integer> commonPrefixSearch(String key, int pos, int len, int nodePos)
{
if (len <= 0)
len = key.length();
if (nodePos <= 0)
nodePos = 0;
List<Integer> result = new ArrayList<Integer>();
char[] keyChars = key.toCharArray();
int b = base[nodePos];
int n;
int p;
for (int i = pos; i < len; i++)
{
p = b + (int) (keyChars[i]) + 1; // 状态转移 p = base[char[i-1]] + char[i] + 1
if (b == check[p]) // base[char[i-1]] == check[base[char[i-1]] + char[i] + 1]
b = base[p];
else
return result;
p = b;
n = base[p];
if (b == check[p] && n < 0) // base[p] == check[p] && base[p] < 0 查到一个词
{
result.add(-n - 1);
}
}
return result;
} | 前缀查询
@param key 查询字串
@param pos 字串的开始位置
@param len 字串长度
@param nodePos base中的开始位置
@return 一个含有所有下标的list |
public LinkedList<Map.Entry<String, V>> commonPrefixSearchWithValue(char[] keyChars, int begin)
{
int len = keyChars.length;
LinkedList<Map.Entry<String, V>> result = new LinkedList<Map.Entry<String, V>>();
int b = base[0];
int n;
int p;
for (int i = begin; i < len; ++i)
{
p = b;
n = base[p];
if (b == check[p] && n < 0) // base[p] == check[p] && base[p] < 0 查到一个词
{
result.add(new AbstractMap.SimpleEntry<String, V>(new String(keyChars, begin, i - begin), v[-n - 1]));
}
p = b + (int) (keyChars[i]) + 1; // 状态转移 p = base[char[i-1]] + char[i] + 1
// 下面这句可能产生下标越界,不如改为if (p < size && b == check[p]),或者多分配一些内存
if (b == check[p]) // base[char[i-1]] == check[base[char[i-1]] + char[i] + 1]
b = base[p];
else
return result;
}
p = b;
n = base[p];
if (b == check[p] && n < 0)
{
result.add(new AbstractMap.SimpleEntry<String, V>(new String(keyChars, begin, len - begin), v[-n - 1]));
}
return result;
} | 优化的前缀查询,可以复用字符数组
@param keyChars
@param begin
@return |
public V get(String key)
{
int index = exactMatchSearch(key);
if (index >= 0)
{
return getValueAt(index);
}
return null;
} | 精确查询
@param key 键
@return 值 |
protected int transition(char[] path)
{
int b = base[0];
int p;
for (int i = 0; i < path.length; ++i)
{
p = b + (int) (path[i]) + 1;
if (b == check[p])
b = base[p];
else
return -1;
}
p = b;
return p;
} | 沿着节点转移状态
@param path
@return |
public int transition(String path, int from)
{
int b = from;
int p;
for (int i = 0; i < path.length(); ++i)
{
p = b + (int) (path.charAt(i)) + 1;
if (b == check[p])
b = base[p];
else
return -1;
}
p = b;
return p;
} | 沿着路径转移状态
@param path 路径
@param from 起点(根起点为base[0]=1)
@return 转移后的状态(双数组下标) |
public int transition(char c, int from)
{
int b = from;
int p;
p = b + (int) (c) + 1;
if (b == check[p])
b = base[p];
else
return -1;
return b;
} | 转移状态
@param c
@param from
@return |
public V output(int state)
{
if (state < 0) return null;
int n = base[state];
if (state == check[state] && n < 0)
{
return v[-n - 1];
}
return null;
} | 检查状态是否对应输出
@param state 双数组下标
@return 对应的值,null表示不输出 |
public void parseText(String text, AhoCorasickDoubleArrayTrie.IHit<V> processor)
{
Searcher searcher = getSearcher(text, 0);
while (searcher.next())
{
processor.hit(searcher.begin, searcher.begin + searcher.length, searcher.value);
}
} | 全切分
@param text 文本
@param processor 处理器 |
public void parseLongestText(String text, AhoCorasickDoubleArrayTrie.IHit<V> processor)
{
LongestSearcher searcher = getLongestSearcher(text, 0);
while (searcher.next())
{
processor.hit(searcher.begin, searcher.begin + searcher.length, searcher.value);
}
} | 最长匹配
@param text 文本
@param processor 处理器 |
public boolean set(String key, V value)
{
int index = exactMatchSearch(key);
if (index >= 0)
{
v[index] = value;
return true;
}
return false;
} | 更新某个键对应的值
@param key 键
@param value 值
@return 是否成功(失败的原因是没有这个键) |
private void shrink()
{
// if (HanLP.Config.DEBUG)
// {
// System.err.printf("释放内存 %d bytes\n", base.length - size - 65535);
// }
int nbase[] = new int[size + 65535];
System.arraycopy(base, 0, nbase, 0, size);
base = nbase;
int ncheck[] = new int[size + 65535];
System.arraycopy(check, 0, ncheck, 0, size);
check = ncheck;
} | 释放空闲的内存 |
private static List<Vertex> dijkstra(Graph graph)
{
List<Vertex> resultList = new LinkedList<Vertex>();
Vertex[] vertexes = graph.getVertexes();
List<EdgeFrom>[] edgesTo = graph.getEdgesTo();
double[] d = new double[vertexes.length];
Arrays.fill(d, Double.MAX_VALUE);
d[d.length - 1] = 0;
int[] path = new int[vertexes.length];
Arrays.fill(path, -1);
PriorityQueue<State> que = new PriorityQueue<State>();
que.add(new State(0, vertexes.length - 1));
while (!que.isEmpty())
{
State p = que.poll();
if (d[p.vertex] < p.cost) continue;
for (EdgeFrom edgeFrom : edgesTo[p.vertex])
{
if (d[edgeFrom.from] > d[p.vertex] + edgeFrom.weight)
{
d[edgeFrom.from] = d[p.vertex] + edgeFrom.weight;
que.add(new State(d[edgeFrom.from], edgeFrom.from));
path[edgeFrom.from] = p.vertex;
}
}
}
for (int t = 0; t != -1; t = path[t])
{
resultList.add(vertexes[t]);
}
return resultList;
} | dijkstra最短路径
@param graph
@return |
public static boolean valid(String[] pinyinStringArray)
{
for (String p : pinyinStringArray)
{
if (!valid(p)) return false;
}
return true;
} | 这些拼音是否全部合格
@param pinyinStringArray
@return |
public static int compare(Long[] arrayA, Long[] arrayB)
{
int len1 = arrayA.length;
int len2 = arrayB.length;
int lim = Math.min(len1, len2);
int k = 0;
while (k < lim)
{
Long c1 = arrayA[k];
Long c2 = arrayB[k];
if (!c1.equals(c2))
{
return c1.compareTo(c2);
}
++k;
}
return len1 - len2;
} | 比较数组A与B的大小关系
@param arrayA
@param arrayB
@return |
public void append(int element)
{
if (this.size == this.data.length)
{
expand();
}
this.data[this.size] = element;
this.size += 1;
} | 在数组尾部新增一个元素
@param element |
public void loseWeight()
{
if (size == data.length)
{
return;
}
int[] newData = new int[size];
System.arraycopy(this.data, 0, newData, 0, size);
this.data = newData;
} | 去掉多余的buffer |
protected static List<AtomNode> simpleAtomSegment(char[] charArray, int start, int end)
{
List<AtomNode> atomNodeList = new LinkedList<AtomNode>();
atomNodeList.add(new AtomNode(new String(charArray, start, end - start), CharType.CT_LETTER));
return atomNodeList;
} | 简易原子分词,将所有字放到一起作为一个词
@param charArray
@param start
@param end
@return |
protected static List<AtomNode> quickAtomSegment(char[] charArray, int start, int end)
{
List<AtomNode> atomNodeList = new LinkedList<AtomNode>();
int offsetAtom = start;
int preType = CharType.get(charArray[offsetAtom]);
int curType;
while (++offsetAtom < end)
{
curType = CharType.get(charArray[offsetAtom]);
if (curType != preType)
{
// 浮点数识别
if (preType == CharType.CT_NUM && ",,..".indexOf(charArray[offsetAtom]) != -1)
{
if (offsetAtom+1 < end)
{
int nextType = CharType.get(charArray[offsetAtom+1]);
if (nextType == CharType.CT_NUM)
{
continue;
}
}
}
atomNodeList.add(new AtomNode(new String(charArray, start, offsetAtom - start), preType));
start = offsetAtom;
}
preType = curType;
}
if (offsetAtom == end)
atomNodeList.add(new AtomNode(new String(charArray, start, offsetAtom - start), preType));
return atomNodeList;
} | 快速原子分词,希望用这个方法替换掉原来缓慢的方法
@param charArray
@param start
@param end
@return |
protected static List<Vertex> combineByCustomDictionary(List<Vertex> vertexList)
{
return combineByCustomDictionary(vertexList, CustomDictionary.dat);
} | 使用用户词典合并粗分结果
@param vertexList 粗分结果
@return 合并后的结果 |
protected static List<Vertex> combineByCustomDictionary(List<Vertex> vertexList, DoubleArrayTrie<CoreDictionary.Attribute> dat)
{
assert vertexList.size() >= 2 : "vertexList至少包含 始##始 和 末##末";
Vertex[] wordNet = new Vertex[vertexList.size()];
vertexList.toArray(wordNet);
// DAT合并
int length = wordNet.length - 1; // 跳过首尾
for (int i = 1; i < length; ++i)
{
int state = 1;
state = dat.transition(wordNet[i].realWord, state);
if (state > 0)
{
int to = i + 1;
int end = to;
CoreDictionary.Attribute value = dat.output(state);
for (; to < length; ++to)
{
state = dat.transition(wordNet[to].realWord, state);
if (state < 0) break;
CoreDictionary.Attribute output = dat.output(state);
if (output != null)
{
value = output;
end = to + 1;
}
}
if (value != null)
{
combineWords(wordNet, i, end, value);
i = end - 1;
}
}
}
// BinTrie合并
if (CustomDictionary.trie != null)
{
for (int i = 1; i < length; ++i)
{
if (wordNet[i] == null) continue;
BaseNode<CoreDictionary.Attribute> state = CustomDictionary.trie.transition(wordNet[i].realWord.toCharArray(), 0);
if (state != null)
{
int to = i + 1;
int end = to;
CoreDictionary.Attribute value = state.getValue();
for (; to < length; ++to)
{
if (wordNet[to] == null) continue;
state = state.transition(wordNet[to].realWord.toCharArray(), 0);
if (state == null) break;
if (state.getValue() != null)
{
value = state.getValue();
end = to + 1;
}
}
if (value != null)
{
combineWords(wordNet, i, end, value);
i = end - 1;
}
}
}
}
vertexList.clear();
for (Vertex vertex : wordNet)
{
if (vertex != null) vertexList.add(vertex);
}
return vertexList;
} | 使用用户词典合并粗分结果
@param vertexList 粗分结果
@param dat 用户自定义词典
@return 合并后的结果 |
protected static List<Vertex> combineByCustomDictionary(List<Vertex> vertexList, final WordNet wordNetAll)
{
return combineByCustomDictionary(vertexList, CustomDictionary.dat, wordNetAll);
} | 使用用户词典合并粗分结果,并将用户词语收集到全词图中
@param vertexList 粗分结果
@param wordNetAll 收集用户词语到全词图中
@return 合并后的结果 |
protected static List<Vertex> combineByCustomDictionary(List<Vertex> vertexList, DoubleArrayTrie<CoreDictionary.Attribute> dat, final WordNet wordNetAll)
{
List<Vertex> outputList = combineByCustomDictionary(vertexList, dat);
int line = 0;
for (final Vertex vertex : outputList)
{
final int parentLength = vertex.realWord.length();
final int currentLine = line;
if (parentLength >= 3)
{
CustomDictionary.parseText(vertex.realWord, new AhoCorasickDoubleArrayTrie.IHit<CoreDictionary.Attribute>()
{
@Override
public void hit(int begin, int end, CoreDictionary.Attribute value)
{
if (end - begin == parentLength) return;
wordNetAll.add(currentLine + begin, new Vertex(vertex.realWord.substring(begin, end), value));
}
});
}
line += parentLength;
}
return outputList;
} | 使用用户词典合并粗分结果,并将用户词语收集到全词图中
@param vertexList 粗分结果
@param dat 用户自定义词典
@param wordNetAll 收集用户词语到全词图中
@return 合并后的结果 |
private static void combineWords(Vertex[] wordNet, int start, int end, CoreDictionary.Attribute value)
{
if (start + 1 == end) // 小优化,如果只有一个词,那就不需要合并,直接应用新属性
{
wordNet[start].attribute = value;
}
else
{
StringBuilder sbTerm = new StringBuilder();
for (int j = start; j < end; ++j)
{
if (wordNet[j] == null) continue;
String realWord = wordNet[j].realWord;
sbTerm.append(realWord);
wordNet[j] = null;
}
wordNet[start] = new Vertex(sbTerm.toString(), value);
}
} | 将连续的词语合并为一个
@param wordNet 词图
@param start 起始下标(包含)
@param end 结束下标(不包含)
@param value 新的属性 |
protected static List<Term> convert(List<Vertex> vertexList, boolean offsetEnabled)
{
assert vertexList != null;
assert vertexList.size() >= 2 : "这条路径不应当短于2" + vertexList.toString();
int length = vertexList.size() - 2;
List<Term> resultList = new ArrayList<Term>(length);
Iterator<Vertex> iterator = vertexList.iterator();
iterator.next();
if (offsetEnabled)
{
int offset = 0;
for (int i = 0; i < length; ++i)
{
Vertex vertex = iterator.next();
Term term = convert(vertex);
term.offset = offset;
offset += term.length();
resultList.add(term);
}
}
else
{
for (int i = 0; i < length; ++i)
{
Vertex vertex = iterator.next();
Term term = convert(vertex);
resultList.add(term);
}
}
return resultList;
} | 将一条路径转为最终结果
@param vertexList
@param offsetEnabled 是否计算offset
@return |
protected void mergeNumberQuantifier(List<Vertex> termList, WordNet wordNetAll, Config config)
{
if (termList.size() < 4) return;
StringBuilder sbQuantifier = new StringBuilder();
ListIterator<Vertex> iterator = termList.listIterator();
iterator.next();
int line = 1;
while (iterator.hasNext())
{
Vertex pre = iterator.next();
if (pre.hasNature(Nature.m))
{
sbQuantifier.append(pre.realWord);
Vertex cur = null;
while (iterator.hasNext() && (cur = iterator.next()).hasNature(Nature.m))
{
sbQuantifier.append(cur.realWord);
iterator.remove();
removeFromWordNet(cur, wordNetAll, line, sbQuantifier.length());
}
if (cur != null)
{
if ((cur.hasNature(Nature.q) || cur.hasNature(Nature.qv) || cur.hasNature(Nature.qt)))
{
if (config.indexMode > 0)
{
wordNetAll.add(line, new Vertex(sbQuantifier.toString(), new CoreDictionary.Attribute(Nature.m)));
}
sbQuantifier.append(cur.realWord);
iterator.remove();
removeFromWordNet(cur, wordNetAll, line, sbQuantifier.length());
}
else
{
line += cur.realWord.length(); // (cur = iterator.next()).hasNature(Nature.m) 最后一个next可能不含q词性
}
}
if (sbQuantifier.length() != pre.realWord.length())
{
for (Vertex vertex : wordNetAll.get(line + pre.realWord.length()))
{
vertex.from = null;
}
pre.realWord = sbQuantifier.toString();
pre.word = Predefine.TAG_NUMBER;
pre.attribute = new CoreDictionary.Attribute(Nature.mq);
pre.wordID = CoreDictionary.M_WORD_ID;
sbQuantifier.setLength(0);
}
}
sbQuantifier.setLength(0);
line += pre.realWord.length();
}
// System.out.println(wordNetAll);
} | 合并数字
@param termList |
private static void removeFromWordNet(Vertex cur, WordNet wordNetAll, int line, int length)
{
LinkedList<Vertex>[] vertexes = wordNetAll.getVertexes();
// 将其从wordNet中删除
for (Vertex vertex : vertexes[line + length])
{
if (vertex.from == cur)
vertex.from = null;
}
ListIterator<Vertex> iterator = vertexes[line + length - cur.realWord.length()].listIterator();
while (iterator.hasNext())
{
Vertex vertex = iterator.next();
if (vertex == cur) iterator.remove();
}
} | 将一个词语从词网中彻底抹除
@param cur 词语
@param wordNetAll 词网
@param line 当前扫描的行数
@param length 当前缓冲区的长度 |
public List<Term> seg(String text)
{
char[] charArray = text.toCharArray();
if (HanLP.Config.Normalization)
{
CharTable.normalization(charArray);
}
if (config.threadNumber > 1 && charArray.length > 10000) // 小文本多线程没意义,反而变慢了
{
List<String> sentenceList = SentencesUtil.toSentenceList(charArray);
String[] sentenceArray = new String[sentenceList.size()];
sentenceList.toArray(sentenceArray);
//noinspection unchecked
List<Term>[] termListArray = new List[sentenceArray.length];
final int per = sentenceArray.length / config.threadNumber;
WorkThread[] threadArray = new WorkThread[config.threadNumber];
for (int i = 0; i < config.threadNumber - 1; ++i)
{
int from = i * per;
threadArray[i] = new WorkThread(sentenceArray, termListArray, from, from + per);
threadArray[i].start();
}
threadArray[config.threadNumber - 1] = new WorkThread(sentenceArray, termListArray, (config.threadNumber - 1) * per, sentenceArray.length);
threadArray[config.threadNumber - 1].start();
try
{
for (WorkThread thread : threadArray)
{
thread.join();
}
}
catch (InterruptedException e)
{
logger.severe("线程同步异常:" + TextUtility.exceptionToString(e));
return Collections.emptyList();
}
List<Term> termList = new LinkedList<Term>();
if (config.offset || config.indexMode > 0) // 由于分割了句子,所以需要重新校正offset
{
int sentenceOffset = 0;
for (int i = 0; i < sentenceArray.length; ++i)
{
for (Term term : termListArray[i])
{
term.offset += sentenceOffset;
termList.add(term);
}
sentenceOffset += sentenceArray[i].length();
}
}
else
{
for (List<Term> list : termListArray)
{
termList.addAll(list);
}
}
return termList;
}
// if (text.length() > 10000) // 针对大文本,先拆成句子,后分词,避免内存峰值太大
// {
// List<Term> termList = new LinkedList<Term>();
// if (config.offset || config.indexMode)
// {
// int sentenceOffset = 0;
// for (String sentence : SentencesUtil.toSentenceList(charArray))
// {
// List<Term> termOfSentence = segSentence(sentence.toCharArray());
// for (Term term : termOfSentence)
// {
// term.offset += sentenceOffset;
// termList.add(term);
// }
// sentenceOffset += sentence.length();
// }
// }
// else
// {
// for (String sentence : SentencesUtil.toSentenceList(charArray))
// {
// termList.addAll(segSentence(sentence.toCharArray()));
// }
// }
//
// return termList;
// }
return segSentence(charArray);
} | 分词<br>
此方法是线程安全的
@param text 待分词文本
@return 单词列表 |
public List<Term> seg(char[] text)
{
assert text != null;
if (HanLP.Config.Normalization)
{
CharTable.normalization(text);
}
return segSentence(text);
} | 分词
@param text 待分词文本
@return 单词列表 |
public List<List<Term>> seg2sentence(String text, boolean shortest)
{
List<List<Term>> resultList = new LinkedList<List<Term>>();
{
for (String sentence : SentencesUtil.toSentenceList(text, shortest))
{
resultList.add(segSentence(sentence.toCharArray()));
}
}
return resultList;
} | 分词断句 输出句子形式
@param text 待分词句子
@param shortest 是否断句为最细的子句(将逗号也视作分隔符)
@return 句子列表,每个句子由一个单词列表组成 |
public Segment enableMultithreading(boolean enable)
{
if (enable) config.threadNumber = Runtime.getRuntime().availableProcessors();
else config.threadNumber = 1;
return this;
} | 开启多线程
@param enable true表示开启[系统CPU核心数]个线程,false表示单线程
@return |
public String toStringWithoutLabels()
{
StringBuilder sb = new StringBuilder(size() * 4);
int i = 1;
for (IWord word : wordList)
{
if (word instanceof CompoundWord)
{
int j = 0;
for (Word w : ((CompoundWord) word).innerList)
{
sb.append(w.getValue());
if (++j != ((CompoundWord) word).innerList.size())
sb.append(' ');
}
}
else
sb.append(word.getValue());
if (i != wordList.size()) sb.append(' ');
++i;
}
return sb.toString();
} | 转换为空格分割无标签的String
@return |
public String toStandoff(boolean withComment)
{
StringBuilder sb = new StringBuilder(size() * 4);
String delimiter = " ";
String text = text(delimiter);
sb.append(text).append('\n');
int i = 1;
int offset = 0;
for (IWord word : wordList)
{
assert text.charAt(offset) == word.getValue().charAt(0);
printWord(word, sb, i, offset, withComment);
++i;
if (word instanceof CompoundWord)
{
int offsetChild = offset;
for (Word child : ((CompoundWord) word).innerList)
{
printWord(child, sb, i, offsetChild, withComment);
offsetChild += child.length();
offsetChild += delimiter.length();
++i;
}
offset += delimiter.length() * ((CompoundWord) word).innerList.size();
}
else
{
offset += delimiter.length();
}
offset += word.length();
}
return sb.toString();
} | brat standoff format<br>
http://brat.nlplab.org/standoff.html
@param withComment
@return |
public Sentence translateLabels()
{
for (IWord word : wordList)
{
word.setLabel(PartOfSpeechTagDictionary.translate(word.getLabel()));
if (word instanceof CompoundWord)
{
for (Word child : ((CompoundWord) word).innerList)
{
child.setLabel(PartOfSpeechTagDictionary.translate(child.getLabel()));
}
}
}
return this;
} | 按照 PartOfSpeechTagDictionary 指定的映射表将词语词性翻译过去
@return |
public Sentence translateCompoundWordLabels()
{
for (IWord word : wordList)
{
if (word instanceof CompoundWord)
word.setLabel(PartOfSpeechTagDictionary.translate(word.getLabel()));
}
return this;
} | 按照 PartOfSpeechTagDictionary 指定的映射表将复合词词语词性翻译过去
@return |
public static Sentence create(String param)
{
if (param == null)
{
return null;
}
param = param.trim();
if (param.isEmpty())
{
return null;
}
Pattern pattern = Pattern.compile("(\\[(([^\\s]+/[0-9a-zA-Z]+)\\s+)+?([^\\s]+/[0-9a-zA-Z]+)]/?[0-9a-zA-Z]+)|([^\\s]+/[0-9a-zA-Z]+)");
Matcher matcher = pattern.matcher(param);
List<IWord> wordList = new LinkedList<IWord>();
while (matcher.find())
{
String single = matcher.group();
IWord word = WordFactory.create(single);
if (word == null)
{
logger.warning("在用 " + single + " 构造单词时失败,句子构造参数为 " + param);
return null;
}
wordList.add(word);
}
if (wordList.isEmpty()) // 按照无词性来解析
{
for (String w : param.split("\\s+"))
{
wordList.add(new Word(w, null));
}
}
return new Sentence(wordList);
} | 以人民日报2014语料格式的字符串创建一个结构化句子
@param param
@return |
public String text(String delimiter)
{
if (delimiter == null) delimiter = "";
StringBuilder sb = new StringBuilder(size() * 3);
for (IWord word : this)
{
if (word instanceof CompoundWord)
{
for (Word child : ((CompoundWord) word).innerList)
{
sb.append(child.getValue()).append(delimiter);
}
}
else
{
sb.append(word.getValue()).append(delimiter);
}
}
sb.setLength(sb.length() - delimiter.length());
return sb.toString();
} | 原始文本形式(无标注,raw text)
@param delimiter 词语之间的分隔符
@return |
public List<IWord> findWordsByLabel(String label)
{
List<IWord> wordList = new LinkedList<IWord>();
for (IWord word : this)
{
if (label.equals(word.getLabel()))
{
wordList.add(word);
}
}
return wordList;
} | 找出所有词性为label的单词(不检查复合词内部的简单词)
@param label
@return |
public IWord findFirstWordByLabel(String label)
{
for (IWord word : this)
{
if (label.equals(word.getLabel()))
{
return word;
}
}
return null;
} | 找出第一个词性为label的单词(不检查复合词内部的简单词)
@param label
@return |
public ListIterator<IWord> findFirstWordIteratorByLabel(String label)
{
ListIterator<IWord> listIterator = this.wordList.listIterator();
while (listIterator.hasNext())
{
IWord word = listIterator.next();
if (label.equals(word.getLabel()))
{
return listIterator;
}
}
return null;
} | 找出第一个词性为label的单词的指针(不检查复合词内部的简单词)<br>
若要查看该单词,请调用 previous<br>
若要删除该单词,请调用 remove<br>
@param label
@return |
public List<Word> toSimpleWordList()
{
List<Word> wordList = new LinkedList<Word>();
for (IWord word : this.wordList)
{
if (word instanceof CompoundWord)
{
wordList.addAll(((CompoundWord) word).innerList);
}
else
{
wordList.add((Word) word);
}
}
return wordList;
} | 转换为简单单词列表
@return |
public String[] toWordArray()
{
List<Word> wordList = toSimpleWordList();
String[] wordArray = new String[wordList.size()];
Iterator<Word> iterator = wordList.iterator();
for (int i = 0; i < wordArray.length; i++)
{
wordArray[i] = iterator.next().value;
}
return wordArray;
} | 获取所有单词构成的数组
@return |
public String[][] toWordTagArray()
{
List<Word> wordList = toSimpleWordList();
String[][] pair = new String[2][wordList.size()];
Iterator<Word> iterator = wordList.iterator();
for (int i = 0; i < pair[0].length; i++)
{
Word word = iterator.next();
pair[0][i] = word.value;
pair[1][i] = word.label;
}
return pair;
} | word pos
@return |
public String[][] toWordTagNerArray(NERTagSet tagSet)
{
List<String[]> tupleList = Utility.convertSentenceToNER(this, tagSet);
String[][] result = new String[3][tupleList.size()];
Iterator<String[]> iterator = tupleList.iterator();
for (int i = 0; i < result[0].length; i++)
{
String[] tuple = iterator.next();
for (int j = 0; j < 3; ++j)
{
result[j][i] = tuple[j];
}
}
return result;
} | word pos ner
@param tagSet
@return |
public synchronized static <T> T get(Object id)
{
SoftReference reference = pool.get(id);
if (reference == null) return null;
return (T) reference.get();
} | 获取对象
@param id 对象的id,可以是任何全局唯一的标示符
@param <T> 对象类型
@return 对象 |
public synchronized static <T> T put(Object id, T value)
{
SoftReference old = pool.put(id, new SoftReference(value));
return old == null ? null : (T) old.get();
} | 存放全局变量
@param id
@param <T>
@return |
public LinkedList<Entry<String, V>> commonPrefixSearchWithValue(char[] key, int begin)
{
LinkedList<Entry<String, Integer>> valueIndex = mdag.commonPrefixSearchWithValueIndex(key, begin);
LinkedList<Entry<String, V>> entryList = new LinkedList<Entry<String, V>>();
for (Entry<String, Integer> entry : valueIndex)
{
entryList.add(new SimpleEntry<String, V>(entry.getKey(), valueList.get(entry.getValue())));
}
return entryList;
} | 前缀查询
@param key
@param begin
@return |
public LinkedList<Entry<String, V>> commonPrefixSearchWithValue(String key)
{
return commonPrefixSearchWithValue(key.toCharArray(), 0);
} | 前缀查询
@param key
@return |
public static int[] compute(int[] obs, int[] states, double[] start_p, double[][] trans_p, double[][] emit_p)
{
int _max_states_value = 0;
for (int s : states)
{
_max_states_value = Math.max(_max_states_value, s);
}
++_max_states_value;
double[][] V = new double[obs.length][_max_states_value];
int[][] path = new int[_max_states_value][obs.length];
for (int y : states)
{
V[0][y] = start_p[y] + emit_p[y][obs[0]];
path[y][0] = y;
}
for (int t = 1; t < obs.length; ++t)
{
int[][] newpath = new int[_max_states_value][obs.length];
for (int y : states)
{
double prob = Double.MAX_VALUE;
int state;
for (int y0 : states)
{
double nprob = V[t - 1][y0] + trans_p[y0][y] + emit_p[y][obs[t]];
if (nprob < prob)
{
prob = nprob;
state = y0;
// 记录最大概率
V[t][y] = prob;
// 记录路径
System.arraycopy(path[state], 0, newpath[y], 0, t);
newpath[y][t] = y;
}
}
}
path = newpath;
}
double prob = Double.MAX_VALUE;
int state = 0;
for (int y : states)
{
if (V[obs.length - 1][y] < prob)
{
prob = V[obs.length - 1][y];
state = y;
}
}
return path[state];
} | 求解HMM模型,所有概率请提前取对数
@param obs 观测序列
@param states 隐状态
@param start_p 初始概率(隐状态)
@param trans_p 转移概率(隐状态)
@param emit_p 发射概率 (隐状态表现为显状态的概率)
@return 最可能的序列 |
public static void compute(List<Vertex> vertexList, TransformMatrix transformMatrixDictionary)
{
if (Nature.values().length != transformMatrixDictionary.states.length)
transformMatrixDictionary.extend(Nature.values().length);
int length = vertexList.size() - 1;
double[][] cost = new double[2][]; // 滚动数组
Iterator<Vertex> iterator = vertexList.iterator();
Vertex start = iterator.next();
Nature pre = start.attribute.nature[0];
// 第一个是确定的
// start.confirmNature(pre);
// 第二个也可以简单地算出来
Vertex preItem;
Nature[] preTagSet;
{
Vertex item = iterator.next();
cost[0] = new double[item.attribute.nature.length];
int j = 0;
int curIndex = 0;
for (Nature cur : item.attribute.nature)
{
cost[0][j] = transformMatrixDictionary.transititon_probability[pre.ordinal()][cur.ordinal()] - Math.log((item.attribute.frequency[curIndex] + 1e-8) / transformMatrixDictionary.getTotalFrequency(cur.ordinal()));
++j;
++curIndex;
}
preTagSet = item.attribute.nature;
preItem = item;
}
// 第三个开始复杂一些
for (int i = 1; i < length; ++i)
{
int index_i = i & 1;
int index_i_1 = 1 - index_i;
Vertex item = iterator.next();
cost[index_i] = new double[item.attribute.nature.length];
double perfect_cost_line = Double.MAX_VALUE;
int k = 0;
Nature[] curTagSet = item.attribute.nature;
for (Nature cur : curTagSet)
{
cost[index_i][k] = Double.MAX_VALUE;
int j = 0;
for (Nature p : preTagSet)
{
double now = cost[index_i_1][j] + transformMatrixDictionary.transititon_probability[p.ordinal()][cur.ordinal()] - Math.log((item.attribute.frequency[k] + 1e-8) / transformMatrixDictionary.getTotalFrequency(cur.ordinal()));
if (now < cost[index_i][k])
{
cost[index_i][k] = now;
if (now < perfect_cost_line)
{
perfect_cost_line = now;
pre = p;
}
}
++j;
}
++k;
}
preItem.confirmNature(pre);
preTagSet = curTagSet;
preItem = item;
}
} | 特化版的求解HMM模型
@param vertexList 包含Vertex.B节点的路径
@param transformMatrixDictionary 词典对应的转移矩阵 |
public static <E extends Enum<E>> List<E> computeEnum(List<EnumItem<E>> roleTagList, TransformMatrixDictionary<E> transformMatrixDictionary)
{
int length = roleTagList.size() - 1;
List<E> tagList = new ArrayList<E>(roleTagList.size());
double[][] cost = new double[2][]; // 滚动数组
Iterator<EnumItem<E>> iterator = roleTagList.iterator();
EnumItem<E> start = iterator.next();
E pre = start.labelMap.entrySet().iterator().next().getKey();
// 第一个是确定的
tagList.add(pre);
// 第二个也可以简单地算出来
Set<E> preTagSet;
{
EnumItem<E> item = iterator.next();
cost[0] = new double[item.labelMap.size()];
int j = 0;
for (E cur : item.labelMap.keySet())
{
cost[0][j] = transformMatrixDictionary.transititon_probability[pre.ordinal()][cur.ordinal()] - Math.log((item.getFrequency(cur) + 1e-8) / transformMatrixDictionary.getTotalFrequency(cur));
++j;
}
preTagSet = item.labelMap.keySet();
}
// 第三个开始复杂一些
for (int i = 1; i < length; ++i)
{
int index_i = i & 1;
int index_i_1 = 1 - index_i;
EnumItem<E> item = iterator.next();
cost[index_i] = new double[item.labelMap.size()];
double perfect_cost_line = Double.MAX_VALUE;
int k = 0;
Set<E> curTagSet = item.labelMap.keySet();
for (E cur : curTagSet)
{
cost[index_i][k] = Double.MAX_VALUE;
int j = 0;
for (E p : preTagSet)
{
double now = cost[index_i_1][j] + transformMatrixDictionary.transititon_probability[p.ordinal()][cur.ordinal()] - Math.log((item.getFrequency(cur) + 1e-8) / transformMatrixDictionary.getTotalFrequency(cur));
if (now < cost[index_i][k])
{
cost[index_i][k] = now;
if (now < perfect_cost_line)
{
perfect_cost_line = now;
pre = p;
}
}
++j;
}
++k;
}
tagList.add(pre);
preTagSet = curTagSet;
}
tagList.add(tagList.get(0)); // 对于最后一个##末##
return tagList;
} | 标准版的Viterbi算法,查准率高,效率稍低
@param roleTagList 观测序列
@param transformMatrixDictionary 转移矩阵
@param <E> EnumItem的具体类型
@return 预测结果 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.