错误一:

// :: ERROR Executor: Exception in task 0.0 in stage 0.0 (TID )
java.lang.NoSuchMethodError: scala.Product.$init$(Lscala/Product;)V
at Person.<init>(RDD_To_DataFrame.scala:)
at RDD_To_DataFrame$.$anonfun$main$(RDD_To_DataFrame.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$$anon$.hasNext(WholeStageCodegenExec.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:)
at org.apache.spark.scheduler.Task.run(Task.scala:)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
// :: ERROR TaskSetManager: Task in stage 0.0 failed times; aborting job
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task in stage 0.0 failed times, most recent failure: Lost task 0.0 in stage 0.0 (TID , localhost, executor driver): java.lang.NoSuchMethodError: scala.Product.$init$(Lscala/Product;)V
at Person.<init>(RDD_To_DataFrame.scala:)
at RDD_To_DataFrame$.$anonfun$main$(RDD_To_DataFrame.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$$anon$.hasNext(WholeStageCodegenExec.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:)
at org.apache.spark.scheduler.Task.run(Task.scala:)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:) Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$.apply(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$.apply(DAGScheduler.scala:)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$.apply(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$.apply(DAGScheduler.scala:)
at scala.Option.foreach(Option.scala:)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:)
at org.apache.spark.util.EventLoop$$anon$.run(EventLoop.scala:)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:)
at org.apache.spark.sql.Dataset$$anonfun$head$.apply(Dataset.scala:)
at org.apache.spark.sql.Dataset$$anonfun$head$.apply(Dataset.scala:)
at org.apache.spark.sql.Dataset$$anonfun$.apply(Dataset.scala:)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:)
at org.apache.spark.sql.Dataset.head(Dataset.scala:)
at org.apache.spark.sql.Dataset.take(Dataset.scala:)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:)
at org.apache.spark.sql.Dataset.show(Dataset.scala:)
at org.apache.spark.sql.Dataset.show(Dataset.scala:)
at org.apache.spark.sql.Dataset.show(Dataset.scala:)
at RDD_To_DataFrame$.main(RDD_To_DataFrame.scala:)
at RDD_To_DataFrame.main(RDD_To_DataFrame.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:)
at java.lang.reflect.Method.invoke(Method.java:)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:)
Caused by: java.lang.NoSuchMethodError: scala.Product.$init$(Lscala/Product;)V
at Person.<init>(RDD_To_DataFrame.scala:)
at RDD_To_DataFrame$.$anonfun$main$(RDD_To_DataFrame.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at scala.collection.Iterator$$anon$.next(Iterator.scala:)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$$anon$.hasNext(WholeStageCodegenExec.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$.apply(SparkPlan.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$$$anonfun$apply$.apply(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:)
at org.apache.spark.scheduler.Task.run(Task.scala:)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)

错误处理:将IDEA中的Scala 改为2.10.4版本

这个问题主要出现在 Spark程序使用 case class 类时

错误二:

Error:(, ) No TypeTag available for (Array[String],)
val documentDF= spark.createDataFrame(Seq(

错误处理:将IDEA中的Scala 改为2.12.3版本

这个问题主要出现在 Spark程序使用 Seq时:

比如:

val df= spark.createDataFrame(Seq(
(,Array("soyo","spark","soyo2","soyo","")),
(,Array("soyo","hadoop","soyo","hadoop","xiaozhou","soyo2","spark","","")),
(,Array("soyo","spark","soyo2","hadoop","soyo3","")),
(,Array("soyo","spark","soyo20","hadoop","soyo2","","")),
(,Array("soyo","","spark","","spark","spark",""))
)).toDF("id","words")

最新文章

  1. drawable animation
  2. 规范化的软件项目演进管理--从 Github 使用说起
  3. linux timezone
  4. 学习windows内核书籍推荐 ----------转自http://tieshow.iteye.com/blog/1565926
  5. [BS-21] 关于OC中对象与指针的思考
  6. Quartz Scheduler(2.2.1) - Working with SchedulerListeners
  7. linux 多线程基础1
  8. js 表单不为空,数字长度验证
  9. 第16讲- UI组件之TextView
  10. OpenCV学习(3)--Mat矩阵的操作
  11. AES 加密
  12. VC++ WIN32 sdk实现按钮自绘详解.
  13. MarkDown的快速入门
  14. nginx防恶意域名解析
  15. Python基础测试题
  16. linux 下安装arm-linux-gnueabi交叉编译器
  17. JVM调优总结 -Xms -Xmx -Xmn -Xss(转)
  18. 关于viewport我自己的理解
  19. 艾伦AI研究院发布AllenNLP:基于PyTorch的NLP工具包
  20. 返回json格式数据乱码

热门文章

  1. 链表相关的leetcode重要题目
  2. list查询棚舍面积的时候,所有棚舍面积的value都是一样的
  3. JAVA基础——文件File简单实用
  4. 解决移动端 footer fixd 定位被键盘顶起来的方案
  5. [Java小程序]聊天室——Socket和ServerSocket的使用
  6. Qt 给QWidget添加工具栏
  7. 洛谷——P2261 [CQOI2007]余数求和
  8. docker插件
  9. Python条件控制语句
  10. 2018百度之星资格赛T2 子串查询