2016-12-26 65 views
0

我正在嘗試集成Apache tika和Apache Solr,以便可以爲我的解析數據編制索引。我使用Solr版本4.3.1和Tika版本爲2.11.6。Apache Tika和Apache Solr通過Java API集成

這我下面的代碼是這樣的:

import java.io.File; 
import java.io.FileInputStream; 


import java.io.IOException; 


import java.io.InputStream; 


import java.util.UUID; 

import org.apache.solr.client.solrj.SolrServer; 


import org.apache.solr.client.solrj.impl.HttpSolrServer; 


import org.apache.solr.common.SolrInputDocument; 


import org.apache.tika.exception.TikaException; 


import org.apache.tika.metadata.DublinCore; 


import org.apache.tika.metadata.Metadata; 


import org.apache.tika.mime.MimeTypes; 


import org.apache.tika.parser.AutoDetectParser; 


import org.apache.tika.parser.ParseContext; 


import org.apache.tika.parser.Parser; 


import org.apache.tika.sax.BodyContentHandler; 


import org.xml.sax.ContentHandler; 


import org.xml.sax.SAXException; 


public class Main { 

    private static SolrServer solr; 

    public static void main(String[] args) throws IOException, SAXException, TikaException { 

     try { 
      solr = new HttpSolrServer("http://localhost:8983/solr/#/"); //create solr connection 

      //solr.deleteByQuery("*:*"); //delete everything in the index; good for testing 

      //location of source documents 
      //later this will be switched to a database 

      String path = "C:\\content\\"; 

      String file_html = path + "mobydick.htm"; 

      String file_txt = path + "/home/ben/abc.warc"; 



      String file_pdf = path + "callofthewild.pdf"; 

      processDocument(file_html); 

      processDocument(file_txt); 

      processDocument(file_pdf); 

      solr.commit(); //after all docs are added, commit to the index 

      //now you can search at http://localhost:8983/solr/browse 
     } 
     catch (Exception ex) { 

      System.out.println(ex.getMessage()); 
     }   
    } 

    private static void processDocument(String pathfilename) { 

     try { 
      InputStream input = new FileInputStream(new File(pathfilename)); 

      //use Apache Tika to convert documents in different formats to plain text 
      ContentHandler textHandler = new BodyContentHandler(10*1024*1024); 

      Metadata meta = new Metadata(); 

      Parser parser = new AutoDetectParser(); 

//handles documents in different formats: 
      ParseContext context = new ParseContext(); 
      parser.parse(input, textHandler, meta, context); //convert to plain text 

      //collect metadata and content from Tika and other sources 

      //document id must be unique, use guide 
     UUID guid = java.util.UUID.randomUUID(); 
     String docid = guid.toString(); 

      //Dublin Core metadata (partial set) 
      String doctitle = meta.get(DublinCore.TITLE); 
      String doccreator = meta.get(DublinCore.CREATOR); 

      //other metadata 
      String docurl = pathfilename; //document url 

      //content 
      String doccontent = textHandler.toString(); 

      //call to index 
      indexDocument(docid, doctitle, doccreator, docurl, doccontent); 
     } 

     catch (Exception ex) { 

      System.out.println(ex.getMessage()); 
     } 
    } 

    private static void indexDocument(String docid, String doctitle, String doccreator, String docurl, String doccontent) { 

     try { 

      SolrInputDocument doc = new SolrInputDocument(); 

      doc.addField("id", docid); 

      //map metadata fields to default schema 
      //location: path\solr-4.7.2\example\solr\collection1\conf\schema.xml 

      //Dublin Core 
      //thought: schema could be modified to use Dublin Core 

      doc.addField("title", doctitle); 

      doc.addField("author", doccreator); 

      //other metadata 


      doc.addField("url", docurl); 

      //content (and text) 
      //per schema, the content field is not indexed by default, used for returning and highlighting document content 
      //the schema "copyField" command automatically copies this to the "text" field which is indexed 

      doc.addField("content", doccontent); 

      //indexing 
      //when a field is indexed, like "text", Solr will handle tokenization, stemming, removal of stopwords etc, per the schema defn 

      //add to index 

      solr.add(doc); 
     } 
     catch (Exception ex) { 

      System.out.println(ex.getMessage()); 
}  } } 

錯誤

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/http/NoHttpResponseException 
     at Main.main(Main.java:28) 


Caused by: java.lang.ClassNotFoundException: org.apache.http.NoHttpResponseException 


    at java.net.URLClassLoader.findClass(URLClassLoader.java:381) 

    at java.lang.ClassLoader.loadClass(ClassLoader.java:424) 

    at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) 

    at java.lang.ClassLoader.loadClass(ClassLoader.java:357) 
    ... 1 more 

回答

0

這似乎並不爲提卡相關的,所以我會專注於Solr的。

具體說明如何包含Solr庫及其依賴項。如果你通過Maven拉他們,他們應該工作。但是如果你手動完成,也許你錯過了一兩個。

具體而言,該錯誤消息是關於與Apache Commons HTTP(客戶端)庫一起分發的缺少的類。也許在依賴關係或類路徑中缺少它。

+0

我通過maven添加了所有的依賴關係,你能告訴我如何將這個代碼連接到solr服務器。 –

+0

好吧,錯誤提示缺少庫。你必須先解決這個問題。 –