2013-04-21 53 views
0
public static void main(String[] args) { 
    Properties props = new Properties(); 
props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref"); 
StanfordCoreNLP pipeline = new StanfordCoreNLP(props); 

// read some text in the text variable 
String text = "the quick fox jumps over the lazy dog"; 

// create an empty Annotation just with the given text 
Annotation document = new Annotation(text); 

// run all Annotators on this text 
pipeline.annotate(document); 

// these are all the sentences in this document 
// a CoreMap is essentially a Map that uses class objects as keys and has values with custom types 
List<CoreMap> sentences = document.get(SentencesAnnotation.class); 

for(CoreMap sentence: sentences) { 
    // traversing the words in the current sentence 
    // a CoreLabel is a CoreMap with additional token-specific methods 
    for (CoreLabel token: sentence.get(TokensAnnotation.class)) { 
    // this is the text of the token 
    String word = token.get(TextAnnotation.class); 
    // this is the POS tag of the token 
    String pos = token.get(PartOfSpeechAnnotation.class); 
    // this is the NER label of the token 
    String ne = token.get(NamedEntityTagAnnotation.class);  
    } 

    // this is the parse tree of the current sentence 
    Tree tree = sentence.get(TreeAnnotation.class); 

    // this is the Stanford dependency graph of the current sentence 
    SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class); 
} 

// This is the coreference link graph 
// Each chain stores a set of mentions that link to each other, 
// along with a method for getting the most representative mention 
// Both sentence and token offsets start at 1! 
Map<Integer, CorefChain> graph = 
    document.get(CorefChainAnnotation.class); 
} 

}用java

我想運行我的程序創建對象CoreNLP但我收到此錯誤

Loading classifier from edu/stanford/nlp/models/ner/english.all.3class.distsim.crf.ser.gz ... Exception in thread "main" java.lang.OutOfMemoryError: Java heap space 
at java.lang.StringBuilder.toString(StringBuilder.java:405) 
at java.io.ObjectInputStream$BlockDataInputStream.readUTFBody(ObjectInputStream.java:3066) 
at java.io.ObjectInputStream$BlockDataInputStream.readUTF(ObjectInputStream.java:2862) 
at java.io.ObjectInputStream.readString(ObjectInputStream.java:1636) 
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1339) 
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370) 
at java.util.HashMap.readObject(HashMap.java:1154) 
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) 
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) 
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) 
at java.lang.reflect.Method.invoke(Method.java:601) 
at java.io.ObjectStreamClass.invokeReadObject(ObjectStreamClass.java:1004) 
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1891) 
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796) 
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1348) 
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1989) 
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1913) 
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796) 
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1348) 
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370) 
at edu.stanford.nlp.ie.crf.CRFClassifier.loadClassifier(CRFClassifier.java:2255) 
at edu.stanford.nlp.ie.AbstractSequenceClassifier.loadClassifier(AbstractSequenceClassifier.java:1444) 
at edu.stanford.nlp.ie.AbstractSequenceClassifier.loadClassifier(AbstractSequenceClassifier.java:1421) 
at edu.stanford.nlp.ie.AbstractSequenceClassifier.loadClassifier(AbstractSequenceClassifier.java:1500) 
at edu.stanford.nlp.ie.AbstractSequenceClassifier.loadClassifier(AbstractSequenceClassifier.java:1487) 
at edu.stanford.nlp.ie.crf.CRFClassifier.getClassifier(CRFClassifier.java:2386) 
at edu.stanford.nlp.ie.ClassifierCombiner.loadClassifierFromPath(ClassifierCombiner.java:130) 
at edu.stanford.nlp.ie.ClassifierCombiner.loadClassifiers(ClassifierCombiner.java:116) 
at edu.stanford.nlp.ie.ClassifierCombiner.<init>(ClassifierCombiner.java:98) 
at edu.stanford.nlp.ie.NERClassifierCombiner.<init>(NERClassifierCombiner.java:64) 
at edu.stanford.nlp.pipeline.StanfordCoreNLP$6.create(StanfordCoreNLP.java:500) 
at edu.stanford.nlp.pipeline.StanfordCoreNLP$6.create(StanfordCoreNLP.java:471) 

我不知道我怎麼可以增加堆大小或此錯誤relateto另一個問題我無法理解。任何人都可以幫助我?(對不起,我的英文不好)

回答

1

如果你的項目是java項目--->右擊項目--->選擇屬性--->點擊運行--->在右側你會看到虛擬機選項,你需要輸入堆設置。

例子:

-Xmx512m 

注:如果512沒有工作,增加至1024和嘗試。

+0

但是,實際上,如果它是64位Java,您可能需要3g。 – 2013-05-11 17:13:02