2014-11-03 41 views
0

您好,我正在嘗試從以下鏈接執行此hcatalog示例。Hcatalog配置單元問題java.lang.IllegalArgumentException:URI:沒有方案

http://www.cloudera.com/content/cloudera/en/documentation/cdh4/v4-2-0/CDH4-Installation-Guide/cdh4ig_topic_19_6.html

我得到以下異常,當我運行作業。

java.lang.IllegalArgumentException異常:URI:沒有一個方案

java類:

import java.io.IOException; 
import java.util.*; 

import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.conf.*; 
import org.apache.hadoop.io.*; 
import org.apache.hadoop.mapreduce.*; 
import org.apache.hadoop.util.*; 
import org.apache.hcatalog.common.*; 
import org.apache.hcatalog.mapreduce.*; 
import org.apache.hcatalog.data.*; 
import org.apache.hcatalog.data.schema.*; 
import org.apache.hadoop.util.GenericOptionsParser; 
//import org.apache.commons.cli.Options; 

public class UseHCat extends Configured implements Tool { 

    public static class Map extends Mapper<WritableComparable, HCatRecord, Text, IntWritable> { 
     String groupname; 

     @Override 
     protected void map(WritableComparable key, 
          HCatRecord value, 
          org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, 
          Text, IntWritable>.Context context) 
      throws IOException, InterruptedException { 
      // The group table from /etc/group has name, 'x', id 
      groupname = (String) value.get(0); 
      int id = (Integer) value.get(2); 
      // Just select and emit the name and ID 
      context.write(new Text(groupname), new IntWritable(id)); 
     } 
    } 

    public static class Reduce extends Reducer<Text, IntWritable, 
             WritableComparable, HCatRecord> { 

     protected void reduce(Text key, 
           java.lang.Iterable<IntWritable> values, 
           org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, 
           WritableComparable, HCatRecord>.Context context) 
      throws IOException, InterruptedException { 
      // Only expecting one ID per group name 
      Iterator<IntWritable> iter = values.iterator(); 
      IntWritable iw = iter.next(); 
      int id = iw.get(); 
      // Emit the group name and ID as a record 
      HCatRecord record = new DefaultHCatRecord(2); 
      record.set(0, key.toString()); 
      record.set(1, id); 
      context.write(null, record); 
     } 
    } 

    @SuppressWarnings("deprecation") 
    public int run(String[] args) throws Exception { 
     Configuration conf = getConf(); 
     args = new GenericOptionsParser(conf, args).getRemainingArgs(); 

     // Get the input and output table names as arguments 
     String inputTableName = args[0]; 
     String outputTableName = args[1]; 
     // Assume the default database 
     String dbName = "hadooppracticedb"; 

     Job job = new Job(conf, "UseHCat"); 
     HCatInputFormat.setInput(job, InputJobInfo.create(dbName, 
       inputTableName, null)); 
     job.setJarByClass(UseHCat.class); 
     job.setMapperClass(Map.class); 
     job.setReducerClass(Reduce.class); 

     // An HCatalog record as input 
     job.setInputFormatClass(HCatInputFormat.class); 

     // Mapper emits a string as key and an integer as value 
     job.setMapOutputKeyClass(Text.class); 
     job.setMapOutputValueClass(IntWritable.class); 

     // Ignore the key for the reducer output; emitting an HCatalog record as value 
     job.setOutputKeyClass(WritableComparable.class); 
     job.setOutputValueClass(DefaultHCatRecord.class); 
     job.setOutputFormatClass(HCatOutputFormat.class); 

     HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName, 
        outputTableName, null)); 
     HCatSchema s = HCatOutputFormat.getTableSchema(job); 
     System.err.println("INFO: output schema explicitly set for writing:" + s); 
     HCatOutputFormat.setSchema(job, s); 
     return (job.waitForCompletion(true) ? 0 : 1); 
    } 

    public static void main(String[] args) throws Exception { 
     int exitCode = ToolRunner.run(new UseHCat(), args); 
     System.exit(exitCode); 
    } 
} 

這行越來越例外

HCatInputFormat.setInput(job, InputJobInfo.create(dbName, 
      inputTableName, null)); 

的Hadoop jar命令:

hadoop jar Hcat.jar com.otsi.hcat.UseHCat -files $ HCATJAR -libjars $ {LIBJ ARS}組的組id

我已設置以下屬性在蜂房-site.xml中

蜂房-site.xml中:

<property> 
     <name>hive.metastore.uris</name> 
     <value>thrift://localhost:9083</value> 
    </property> 

我已經建立了2個表組組識別符號中的 「hadooppracticedb」。

請建議。

回答

0

您還沒有定義metastore局部屬性?

如果您正在運行獨立MetaStore服務器,則只應設置hive.metastore.uris屬性,在這種情況下,您需要設置hive.metastore.local = false並將hive.metastore.uris設置爲Thrift URI。

請參閱本文檔的詳細信息:

https://cwiki.apache.org/confluence/display/Hive/AdminManual+MetastoreAdmin

+0

嗨,我使用的蜂房0.13.0..hive.metastore.uris 蜂巢連接到這些URI之一進行元數據請求遠程Metastore(逗號分隔的URI列表) hive.metastore.local 本地或遠程Metastore(從Hive 0.10中刪除:如果hive.metastore.uris爲空,則假定本地模式爲遠程,否則爲遠程..並運行此示例在2節點羣集上。 – user1217694 2014-11-03 10:49:41

+0

hive-site.xml; 配置單元。 metastore.uris thrift:// NameNode:9083 user1217694 2014-11-03 10:57:17