package org.apache.lucene.classification;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IntsRefBuilder;
import org.apache.lucene.util.fst.Builder;
import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.PositiveIntOutputs;
import org.apache.lucene.util.fst.Util;
public class BooleanPerceptronClassifier implements Classifier<Boolean> {
private final Double bias;
private final Terms textTerms;
private final Analyzer analyzer;
private final String textFieldName;
private FST<Long> fst;
public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Query query, Integer batchSize,
Double bias, String classFieldName, String textFieldName) throws IOException {
this.textTerms = MultiTerms.getTerms(indexReader, textFieldName);
if (textTerms == null) {
throw new IOException("term vectors need to be available for field " + textFieldName);
}
this.analyzer = analyzer;
this.textFieldName = textFieldName;
if (bias == null || bias == 0d) {
double t = (double) indexReader.getSumTotalTermFreq(textFieldName) / (double) indexReader.getDocCount(textFieldName);
if (t != -1) {
this.bias = t;
} else {
throw new IOException(
"bias cannot be assigned since term vectors for field "
+ textFieldName + " do not exist");
}
} else {
this.bias = bias;
}
SortedMap<String, Double> weights = new ConcurrentSkipListMap<>();
TermsEnum termsEnum = textTerms.iterator();
BytesRef textTerm;
while ((textTerm = termsEnum.next()) != null) {
weights.put(textTerm.utf8ToString(), (double) termsEnum.totalTermFreq());
}
updateFST(weights);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
int batchCount = 0;
BooleanQuery.Builder q = new BooleanQuery.Builder();
q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, "*")), BooleanClause.Occur.MUST));
if (query != null) {
q.add(new BooleanClause(query, BooleanClause.Occur.MUST));
}
for (ScoreDoc scoreDoc : indexSearcher.search(q.build(),
Integer.MAX_VALUE).scoreDocs) {
Document doc = indexSearcher.doc(scoreDoc.doc);
IndexableField textField = doc.getField(textFieldName);
IndexableField classField = doc.getField(classFieldName);
if (textField != null && classField != null) {
ClassificationResult<Boolean> classificationResult = assignClass(textField.stringValue());
Boolean assignedClass = classificationResult.getAssignedClass();
Boolean correctClass = Boolean.valueOf(classField.stringValue());
long modifier = correctClass.compareTo(assignedClass);
if (modifier != 0) {
updateWeights(indexReader, scoreDoc.doc, assignedClass,
weights, modifier, batchCount % batchSize == 0);
}
batchCount++;
}
}
weights.clear();
}
private void updateWeights(IndexReader indexReader,
int docId, Boolean assignedClass, SortedMap<String, Double> weights,
double modifier, boolean updateFST) throws IOException {
TermsEnum cte = textTerms.iterator();
Terms terms = indexReader.getTermVector(docId, textFieldName);
if (terms == null) {
throw new IOException("term vectors must be stored for field "
+ textFieldName);
}
TermsEnum termsEnum = terms.iterator();
BytesRef term;
while ((term = termsEnum.next()) != null) {
cte.seekExact(term);
if (assignedClass != null) {
long termFreqLocal = termsEnum.totalTermFreq();
Long previousValue = Util.get(fst, term);
String termString = term.utf8ToString();
weights.put(termString, previousValue == null ? 0 : Math.max(0, previousValue + modifier * termFreqLocal));
}
}
if (updateFST) {
updateFST(weights);
}
}
private void updateFST(SortedMap<String, Double> weights) throws IOException {
PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
Builder<Long> fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
BytesRefBuilder scratchBytes = new BytesRefBuilder();
IntsRefBuilder scratchInts = new IntsRefBuilder();
for (Map.Entry<String, Double> entry : weights.entrySet()) {
scratchBytes.copyChars(entry.getKey());
fstBuilder.add(Util.toIntsRef(scratchBytes.get(), scratchInts), entry
.getValue().longValue());
}
fst = fstBuilder.finish();
}
@Override
public ClassificationResult<Boolean> assignClass(String text)
throws IOException {
Long output = 0L;
try (TokenStream tokenStream = analyzer.tokenStream(textFieldName, text)) {
CharTermAttribute charTermAttribute = tokenStream
.addAttribute(CharTermAttribute.class);
tokenStream.reset();
while (tokenStream.incrementToken()) {
String s = charTermAttribute.toString();
Long d = Util.get(fst, new BytesRef(s));
if (d != null) {
output += d;
}
}
tokenStream.end();
}
double score = 1 - Math.exp(-1 * Math.abs(bias - output.doubleValue()) / bias);
return new ClassificationResult<>(output >= bias, score);
}
@Override
public List<ClassificationResult<Boolean>> getClasses(String text)
throws IOException {
return null;
}
@Override
public List<ClassificationResult<Boolean>> getClasses(String text, int max)
throws IOException {
return null;
}
}