java - hadoop: expected org.apache.hadoop.io.LongWritable, received org.apache.hadoop.io.Text -


i new hadoop , trying run sample program book. facing error

java.io.ioexception: type mismatch in key map: expected org.apache.hadoop.io.longwritable, recieved org.apache.hadoop.io.text

please me resolve error. below code

import org.apache.hadoop.conf.configuration; import org.apache.hadoop.conf.configured; import org.apache.hadoop.fs.filesystem; import org.apache.hadoop.fs.path; import org.apache.hadoop.io.text; import org.apache.hadoop.mapred.fileinputformat; import org.apache.hadoop.mapred.fileoutputformat; import org.apache.hadoop.mapred.jobclient; import org.apache.hadoop.mapred.jobconf; import org.apache.hadoop.mapred.keyvaluetextinputformat; import org.apache.hadoop.mapred.mapreducebase; import org.apache.hadoop.mapred.mapper; import org.apache.hadoop.mapred.outputcollector; import org.apache.hadoop.mapred.reducer; import org.apache.hadoop.mapred.reporter; import org.apache.hadoop.mapred.textoutputformat; import org.apache.hadoop.util.tool; import org.apache.hadoop.util.toolrunner;  public class hadoopjob extends configured implements tool {    public static class mapperclass extends mapreducebase implements mapper<text, text, text, text> {          @override         public void map(text key, text value, outputcollector<text, text> output,                 reporter reporter) throws ioexception {             output.collect(value, key);                  }      }      public static class reducerclass extends mapreducebase implements reducer<text, text, text, text> {          @override         public void reduce(text key, iterator<text> values,                 outputcollector<text, text> output, reporter reporter) throws ioexception {              string csv = "";             while (values.hasnext()) {                 if (csv.length() > 0) {                     csv += ",";                 }                  csv += values.next().tostring();             }              output.collect(key, new text(csv));         }      }      public static void main(string[] args) throws exception{         int res = toolrunner.run(new configuration(), new hadoopjob(), args);         system.exit(res);            }      @override     public int run(string[] arg0) throws exception {          configuration conf = getconf();         jobconf job = new jobconf(conf, hadoopjob.class);           path in = new path("/home/sumit/documents/samples/wordcount/input");         path out = new path("/home/sumit/documents/samples/wordcount/output");          filesystem fs = filesystem.get(new configuration());         if (fs.exists(out)) {             fs.delete(out, true);         }          fileinputformat.setinputpaths(job, in);         fileoutputformat.setoutputpath(job, out);          job.setjobname("hadoop job");         job.setmapperclass(mapperclass.class);         job.setreducerclass(reducerclass.class);          job.setinputformat(keyvaluetextinputformat.class);         job.setoutputformat(textoutputformat.class);         job.setoutputvalueclass(text.class);         job.set("key.value.separator.in.input.line", ",");          jobclient.runjob(job);          return 0;     }  } 

also please see traces below

13/05/18 21:04:54 warn util.nativecodeloader: unable load native-hadoop library platform... using builtin-java classes applicable 13/05/18 21:04:54 warn mapred.jobclient: no job jar file set.  user classes may not found. see jobconf(class) or jobconf#setjar(string). 13/05/18 21:04:54 warn snappy.loadsnappy: snappy native library not loaded 13/05/18 21:04:54 info mapred.fileinputformat: total input paths process : 1 13/05/18 21:04:55 info mapred.jobclient: running job: job_local_0001 13/05/18 21:04:55 info util.processtree: setsid exited exit code 0 13/05/18 21:04:55 info mapred.task:  using resourcecalculatorplugin : org.apache.hadoop.util.linuxresourcecalculatorplugin@5090d8ea 13/05/18 21:04:55 info mapred.maptask: numreducetasks: 1 13/05/18 21:04:55 info mapred.maptask: io.sort.mb = 100 13/05/18 21:04:55 info mapred.maptask: data buffer = 79691776/99614720 13/05/18 21:04:55 info mapred.maptask: record buffer = 262144/327680 13/05/18 21:04:55 warn mapred.localjobrunner: job_local_0001 java.io.ioexception: type mismatch in key map: expected org.apache.hadoop.io.longwritable, recieved org.apache.hadoop.io.text     @ org.apache.hadoop.mapred.maptask$mapoutputbuffer.collect(maptask.java:1014)     @ org.apache.hadoop.mapred.maptask$oldoutputcollector.collect(maptask.java:592)     @ com.sis.hadoop.wordcount.hadoopjob$mapperclass.map(hadoopjob.java:32)     @ com.sis.hadoop.wordcount.hadoopjob$mapperclass.map(hadoopjob.java:1)     @ org.apache.hadoop.mapred.maprunner.run(maprunner.java:50)     @ org.apache.hadoop.mapred.maptask.runoldmapper(maptask.java:436)     @ org.apache.hadoop.mapred.maptask.run(maptask.java:372)     @ org.apache.hadoop.mapred.localjobrunner$job.run(localjobrunner.java:212) 13/05/18 21:04:56 info mapred.jobclient:  map 0% reduce 0% 13/05/18 21:04:56 info mapred.jobclient: job complete: job_local_0001 13/05/18 21:04:56 info mapred.jobclient: counters: 0 13/05/18 21:04:56 info mapred.jobclient: job failed: na exception in thread "main" java.io.ioexception: job failed!     @ org.apache.hadoop.mapred.jobclient.runjob(jobclient.java:1265)     @ com.sis.hadoop.wordcount.hadoopjob.run(hadoopjob.java:88)     @ org.apache.hadoop.util.toolrunner.run(toolrunner.java:65)     @ com.sis.hadoop.wordcount.hadoopjob.main(hadoopjob.java:58) 

you should explicitly define output key class in job configuration (like have done value):

job.setoutputvalueclass(text.class); job.setoutputkeyclass(text.class); 

the default longwritable


Comments

Popular posts from this blog

SPSS keyboard combination alters encoding -

Add new record to the table by click on the button in Microsoft Access -

javascript - jQuery .height() return 0 when visible but non-0 when hidden -