yarn - Error in running spark application -
i have simple spark application run on pre-built spark 1.4.1 gt strange error message follows
exception in thread "main" java.lang.noclassdeffounderror: com/google/protobuf/serviceexception @ org.apache.hadoop.ipc.protobufrpcengine.<clinit>(protobufrpcengine.java:69) @ java.lang.class.forname0(native method) @ java.lang.class.forname(class.java:270) @ org.apache.hadoop.conf.configuration.getclassbynameornull(configuration.java:1659) @ org.apache.hadoop.conf.configuration.getclassbyname(configuration.java:1624) @ org.apache.hadoop.conf.configuration.getclass(configuration.java:1718) @ org.apache.hadoop.ipc.rpc.getprotocolengine(rpc.java:203) @ org.apache.hadoop.ipc.rpc.getprotocolproxy(rpc.java:537) @ org.apache.hadoop.hdfs.namenodeproxies.creatennproxywithclientprotocol(namenodeproxies.java:328) @ org.apache.hadoop.hdfs.namenodeproxies.createnonhaproxy(namenodeproxies.java:235) @ org.apache.hadoop.hdfs.namenodeproxies.createproxy(namenodeproxies.java:139) @ org.apache.hadoop.hdfs.dfsclient.<init>(dfsclient.java:510) @ org.apache.hadoop.hdfs.dfsclient.<init>(dfsclient.java:453) @ org.apache.hadoop.hdfs.distributedfilesystem.initialize(distributedfilesystem.java:136) @ org.apache.hadoop.fs.filesystem.createfilesystem(filesystem.java:2433) @ org.apache.hadoop.fs.filesystem.access$200(filesystem.java:88) @ org.apache.hadoop.fs.filesystem$cache.getinternal(filesystem.java:2467) @ org.apache.hadoop.fs.filesystem$cache.get(filesystem.java:2449) @ org.apache.hadoop.fs.filesystem.get(filesystem.java:367) @ org.apache.hadoop.fs.filesystem.get(filesystem.java:166) @ org.apache.hadoop.mapred.jobconf.getworkingdirectory(jobconf.java:653) @ org.apache.hadoop.mapred.fileinputformat.setinputpaths(fileinputformat.java:389) @ org.apache.hadoop.mapred.fileinputformat.setinputpaths(fileinputformat.java:362) @ org.apache.spark.sparkcontext$$anonfun$hadoopfile$1$$anonfun$32.apply(sparkcontext.scala:980) @ org.apache.spark.sparkcontext$$anonfun$hadoopfile$1$$anonfun$32.apply(sparkcontext.scala:980) @ org.apache.spark.rdd.hadooprdd$$anonfun$getjobconf$6.apply(hadooprdd.scala:176) @ org.apache.spark.rdd.hadooprdd$$anonfun$getjobconf$6.apply(hadooprdd.scala:176) @ scala.option.map(option.scala:145) @ org.apache.spark.rdd.hadooprdd.getjobconf(hadooprdd.scala:176) @ org.apache.spark.rdd.hadooprdd.getpartitions(hadooprdd.scala:200) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:219) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:217) @ scala.option.getorelse(option.scala:120) @ org.apache.spark.rdd.rdd.partitions(rdd.scala:217) @ org.apache.spark.rdd.mappartitionsrdd.getpartitions(mappartitionsrdd.scala:32) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:219) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:217) @ scala.option.getorelse(option.scala:120) @ org.apache.spark.rdd.rdd.partitions(rdd.scala:217) @ org.apache.spark.rdd.mappartitionsrdd.getpartitions(mappartitionsrdd.scala:32) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:219) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:217) @ scala.option.getorelse(option.scala:120) @ org.apache.spark.rdd.rdd.partitions(rdd.scala:217) @ org.apache.spark.rdd.mappartitionsrdd.getpartitions(mappartitionsrdd.scala:32) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:219) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:217) @ scala.option.getorelse(option.scala:120) @ org.apache.spark.rdd.rdd.partitions(rdd.scala:217) @ org.apache.spark.rdd.mappartitionsrdd.getpartitions(mappartitionsrdd.scala:32) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:219) @ org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:217) @ scala.option.getorelse(option.scala:120) @ org.apache.spark.rdd.rdd.partitions(rdd.scala:217) @ org.apache.spark.partitioner$.defaultpartitioner(partitioner.scala:65) @ org.apache.spark.rdd.pairrddfunctions$$anonfun$reducebykey$3.apply(pairrddfunctions.scala:290) @ org.apache.spark.rdd.pairrddfunctions$$anonfun$reducebykey$3.apply(pairrddfunctions.scala:290) @ org.apache.spark.rdd.rddoperationscope$.withscope(rddoperationscope.scala:147) @ org.apache.spark.rdd.rddoperationscope$.withscope(rddoperationscope.scala:108) @ org.apache.spark.rdd.rdd.withscope(rdd.scala:286) @ org.apache.spark.rdd.pairrddfunctions.reducebykey(pairrddfunctions.scala:289) @ org.apache.spark.mllib.feature.word2vec.learnvocab(word2vec.scala:149) @ org.apache.spark.mllib.feature.word2vec.fit(word2vec.scala:267) @ simpleapp$.main(simpleapp.scala:12) @ simpleapp.main(simpleapp.scala) @ sun.reflect.nativemethodaccessorimpl.invoke0(native method) @ sun.reflect.nativemethodaccessorimpl.invoke(nativemethodaccessorimpl.java:57) @ sun.reflect.delegatingmethodaccessorimpl.invoke(delegatingmethodaccessorimpl.java:43) @ java.lang.reflect.method.invoke(method.java:606) @ org.apache.spark.deploy.sparksubmit$.org$apache$spark$deploy$sparksubmit$$runmain(sparksubmit.scala:665 @ org.apache.spark.deploy.sparksubmit$.dorunmain$1(sparksubmit.scala:170) @ org.apache.spark.deploy.sparksubmit$.submit(sparksubmit.scala:193) @ org.apache.spark.deploy.sparksubmit$.main(sparksubmit.scala:112) @ org.apache.spark.deploy.sparksubmit.main(sparksubmit.scala) caused by: java.lang.classnotfoundexception: com.google.protobuf.serviceexception @ java.net.urlclassloader$1.run(urlclassloader.java:366) @ java.net.urlclassloader$1.run(urlclassloader.java:355) @ java.security.accesscontroller.doprivileged(native method) @ java.net.urlclassloader.findclass(urlclassloader.java:354) @ java.lang.classloader.loadclass(classloader.java:425) @ sun.misc.launcher$appclassloader.loadclass(launcher.java:308) @ java.lang.classloader.loadclass(classloader.java:358)
i tried add protobuf-java-2.0.3.jar
classpath didn't help. idea on problem can be?
Comments
Post a Comment