2017-03-04 3 views
-1

하나의 단일 디렉토리에 포함 된 25,000 개의 개별 텍스트 영화 리뷰에 대한 감정 분석을 수행하기 위해 Stanford CoreNLP를 사용하고 있습니다. 이렇게하기 위해서는 스탠포드 코드를 하나의 텍스트 파일 내에서 개별 문장 만 분석하기 때문에 약간 수정해야합니다.Java - 디렉토리 내의 각 파일 내용 처리

다음이 수행에서 내 시도이다 : 나는 다음과 같은 오류가 나타날 수있는

import java.io.File; 
import java.io.IOException; 
import java.nio.charset.Charset; 
import java.util.Iterator; 
import java.util.List; 
import java.util.Map; 
import java.util.Properties; 

import org.apache.commons.io.FileUtils; 

import com.google.common.io.Files; 

import edu.stanford.nlp.dcoref.CorefChain; 
import edu.stanford.nlp.dcoref.CorefCoreAnnotations.CorefChainAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation; 
import edu.stanford.nlp.ling.CoreLabel; 
import edu.stanford.nlp.pipeline.Annotation; 
import edu.stanford.nlp.pipeline.StanfordCoreNLP; 
import edu.stanford.nlp.semgraph.SemanticGraph; 
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; 
import edu.stanford.nlp.trees.Tree; 
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation; 
import edu.stanford.nlp.util.CoreMap; 
import java.io.File; 
import java.util.Iterator; 
import org.apache.commons.io.*; 

/** A simple corenlp example ripped directly from the Stanford CoreNLP website using text from wikinews. */ 
public class sentimentMain { 

    public static void main(String[] args) throws IOException { 
    // creates a StanfordCoreNLP object, with POS tagging, lemmatization, NER, parsing, and coreference resolution 
    Properties props = new Properties(); 
    props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref"); 
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props); 

    // read some text from the file.. 
    Iterator it = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\neg"), null, false); 
    Iterator it1 = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\pos"), null, false); 
    Iterator it2 = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\unsup"), null, false); 

    File inputFile = new File ((String) (it.next())); 
    String text = Files.toString(inputFile, Charset.forName("UTF-8")); 
    System.out.println(text); 

    //File inputFile = new File("C:/stanford-corenlp-full-2016-10-31/input.txt"); 
    //String text = Files.toString(inputFile, Charset.forName("UTF-8")); 

    // create an empty Annotation just with the given text 
    Annotation document = new Annotation(text); 

    // run all Annotators on this text 
    pipeline.annotate(document); 

    // these are all the sentences in this document 
    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types 
    List<CoreMap> sentences = document.get(SentencesAnnotation.class); 

    for(CoreMap sentence: sentences) { 
     // traversing the words in the current sentence 
     // a CoreLabel is a CoreMap with additional token-specific methods 
     for (CoreLabel token: sentence.get(TokensAnnotation.class)) { 
     // this is the text of the token 
     String word = token.get(TextAnnotation.class); 
     // this is the POS tag of the token 
     String pos = token.get(PartOfSpeechAnnotation.class); 
     // this is the NER label of the token 
     String ne = token.get(NamedEntityTagAnnotation.class); 

     System.out.println("word: " + word + " pos: " + pos + " ne:" + ne); 
     } 

     // this is the parse tree of the current sentence 
     Tree tree = sentence.get(TreeAnnotation.class); 
     System.out.println("parse tree:\n" + tree); 

     // this is the Stanford dependency graph of the current sentence 
     SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class); 
     System.out.println("dependency graph:\n" + dependencies); 
    } 

    // This is the coreference link graph 
    // Each chain stores a set of mentions that link to each other, 
    // along with a method for getting the most representative mention 
    // Both sentence and token offsets start at 1! 
    Map<Integer, CorefChain> graph = 
     document.get(CorefChainAnnotation.class); 

    } 

} 

:

Exception in thread "main" java.lang.ClassCastException: java.io.File cannot be cast to java.lang.String 
    at sentimentMain.main(sentimentMain.java:46) 

나는 "it.next는()"변환 할 수없는 것을 이해 문자열로하지만, 다른 사람이 파일의 내용이 처리를위한 문자열로 입력되고 있는지 확인할 수있는 다른 방법을 알고 있습니까? 사전에

감사합니다 :)

+0

'Annotation document = new Annotation (text);'범위에없는'text' 변수에 접근하려고합니다. 'while (it.hasNext()) {'loop 안에 정의했다. –

답변

0

그것의 괜찮은 IDE가 당신을 보여 것 정직하고 컴파일 오류. 변수 - "text"는 while 루프 외부에서 사용할 수 없습니다. while 루프를 시작하기 전에 선언하거나 문서 선언을 while 루프 내에 있어야합니다.

편집 된 코드는 아래에서 찾으십시오.

import java.io.File; 
import java.io.IOException; 
import java.nio.charset.Charset; 
import java.util.Iterator; 
import java.util.List; 
import java.util.Map; 
import java.util.Properties; 

import org.apache.commons.io.FileUtils; 

import com.google.common.io.Files; 

import edu.stanford.nlp.dcoref.CorefChain; 
import edu.stanford.nlp.dcoref.CorefCoreAnnotations.CorefChainAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation; 
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation; 
import edu.stanford.nlp.ling.CoreLabel; 
import edu.stanford.nlp.pipeline.Annotation; 
import edu.stanford.nlp.pipeline.StanfordCoreNLP; 
import edu.stanford.nlp.semgraph.SemanticGraph; 
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; 
import edu.stanford.nlp.trees.Tree; 
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation; 
import edu.stanford.nlp.util.CoreMap; 
import java.io.File; 
import java.util.Iterator; 
import org.apache.commons.io.*; 

/** A simple corenlp example ripped directly from the Stanford CoreNLP website using text from wikinews. */ 
public class sentimentMain { 

    public static void main(String[] args) throws IOException { 
    // creates a StanfordCoreNLP object, with POS tagging, lemmatization, NER, parsing, and coreference resolution 
    Properties props = new Properties(); 
    props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref"); 
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props); 

    // read some text from the file.. 
    Iterator it = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\neg"), null, false); 
    Iterator it1 = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\pos"), null, false); 
    Iterator it2 = FileUtils.iterateFiles(new File("C:\\stanford-corenlp-full-2016-10-31\\train\\unsup"), null, false); 

    while(it.hasNext()){ 

     File inputFile = new File ((String) (it.next())); 
     String text = Files.toString(inputFile, Charset.forName("UTF-8")); 
     System.out.println(text); 
    //File inputFile = new File("C:/stanford-corenlp-full-2016-10-31/input.txt"); 
    //String text = Files.toString(inputFile, Charset.forName("UTF-8")); 

    // create an empty Annotation just with the given text 
    Annotation document = new Annotation(text); 

    // run all Annotators on this text 
    pipeline.annotate(document); 

    // these are all the sentences in this document 
    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types 
    List<CoreMap> sentences = document.get(SentencesAnnotation.class); 

    for(CoreMap sentence: sentences) { 
     // traversing the words in the current sentence 
     // a CoreLabel is a CoreMap with additional token-specific methods 
     for (CoreLabel token: sentence.get(TokensAnnotation.class)) { 
     // this is the text of the token 
     String word = token.get(TextAnnotation.class); 
     // this is the POS tag of the token 
     String pos = token.get(PartOfSpeechAnnotation.class); 
     // this is the NER label of the token 
     String ne = token.get(NamedEntityTagAnnotation.class); 

     System.out.println("word: " + word + " pos: " + pos + " ne:" + ne); 
     } 

     // this is the parse tree of the current sentence 
     Tree tree = sentence.get(TreeAnnotation.class); 
     System.out.println("parse tree:\n" + tree); 

     // this is the Stanford dependency graph of the current sentence 
     SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class); 
     System.out.println("dependency graph:\n" + dependencies); 
    } 

    // This is the coreference link graph 
    // Each chain stores a set of mentions that link to each other, 
    // along with a method for getting the most representative mention 
    // Both sentence and token offsets start at 1! 
    Map<Integer, CorefChain> graph = 
     document.get(CorefChainAnnotation.class); 

    } 
    } 

} 
+0

이 코드는 내가 가지고 있던 문제를 해결했다. 이제는 it.next()를 캐스팅 할 수 없다. – user7575479

+0

스레드 "main"의 예외 java.lang.ClassCastException : java.io.File을 java.lang.String으로 형변환 할 수 없다. \t at sentimentMain.main (sentimentMain.java:50) – user7575479