2016-06-07 113 views
0

我是Hadoop編程的新手,我已經開始通過在三節點集羣上設置Hadoop 2.7.1來學習。我曾嘗試運行Hadoop中出現的helloworld jar,它運行良好,但我在本地機器上寫了自己的驅動程序代碼,並將其捆綁到一個jar中並以此方式執行,但是它沒有出現錯誤消息。Hadoop Jar運行但沒有輸出。驅動程序,映射器和減少在namenode成功編譯

這是我的代碼,這就是我所做的。

WordCountMapper.java

package mot.com.bin.test; 

import java.io.IOException; 

import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapred.MapReduceBase; 
import org.apache.hadoop.mapred.Mapper; 
import org.apache.hadoop.mapred.OutputCollector; 
import org.apache.hadoop.mapred.Reporter; 



public class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> 

{ 

     public void map(LongWritable key, Text Value, 
         OutputCollector<Text, IntWritable> opc, Reporter r) 
         throws IOException { 
       String s = Value.toString(); 
       for (String word :s.split(" ")) { 
         if(word.length() > 0) { 
           opc.collect(new Text(word), new IntWritable(1)); 
         } 
       } 

     } 


} 

WordCountReduce.java

package mot.com.bin.test; 

import java.io.IOException; 
import java.util.Iterator; 

import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapred.MapReduceBase; 
import org.apache.hadoop.mapred.OutputCollector; 
import org.apache.hadoop.mapred.Reducer; 
import org.apache.hadoop.mapred.Reporter; 

public class WordCountReduce extends MapReduceBase implements Reducer < Text, IntWritable, Text, IntWritable>{ 

     public void reduce(Text key, Iterator<IntWritable> values, 
         OutputCollector<Text, IntWritable> opc, Reporter r) 
         throws IOException { 
       // TODO Auto-generated method stub 

       int i = 0; 
       while (values.hasNext()) { 
         IntWritable in = values.next(); 
         i+=in.get(); 
       } 
       opc.collect(key, new IntWritable (i)); 
     } 

WordCount.java

/** 
* **DRIVER** 
*/ 
package mot.com.bin.test; 

import org.apache.hadoop.conf.Configured; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.mapred.FileInputFormat; 
import org.apache.hadoop.mapred.FileOutputFormat; 
import org.apache.hadoop.mapred.JobClient; 
import org.apache.hadoop.mapred.JobConf; 
import org.apache.hadoop.util.Tool; 
import org.apache.hadoop.io.Text; 
//import com.sun.jersey.core.impl.provider.entity.XMLJAXBElementProvider.Text; 

/** 
* @author rgb764 
* 
*/ 
public class WordCount extends Configured implements Tool{ 

     /** 
     * @param args 
     */ 
     public static void main(String[] args) { 
       // TODO Auto-generated method stub 

     } 

     public int run(String[] arg0) throws Exception { 

       if (arg0.length < 2) { 
         System.out.println("Need input file and output directory"); 
         return -1; 
       } 

       JobConf conf = new JobConf(); 

       FileInputFormat.setInputPaths(conf, new Path(arg0[0])); 
       FileOutputFormat.setOutputPath(conf, new Path(arg0[1])); 

       conf.setOutputKeyClass(Text.class); 
       conf.setOutputValueClass(IntWritable.class); 

       conf.setMapperClass(WordCountMapper.class); 
       conf.setReducerClass(WordCountReduce.class); 

       conf.setOutputKeyClass(Text.class); 
       conf.setOutputValueClass(IntWritable.class); 

       JobClient.runJob(conf); 
       return 0; 
     } 

} 

開始我嘗試提取它作爲Eclipse的一個罐子,在我的Hadoop運行簇。沒有錯誤,但沒有成功。然後將我的個人java文件移動到我的NameNode並編譯每個java文件,然後在那裏創建jar文件,仍然hadoop命令返回沒有結果,但沒有錯誤。請幫助我。

hadoop jar WordCout.jar mot.com.bin.test.WordCount /karthik/mytext.txt /tempo 

使用Maven提取所有相關的jar文件,我將它們添加到我的名稱節點中的類路徑中。幫助我確定我在哪裏出錯。

回答

1

國際海事組織你錯過了你的主要方法實例化工具實現(在你的情況WordCount)並運行相同的代碼。

 public static void main(String[] args) throws Exception {   
      int res = ToolRunner.run(new Configuration(), new WordCount(), args);   
      System.exit(res); 
    } 

參照this

相關問題