监控spark应用的方式比较多,比如spark on yarn可以通过yarnClient api监控。这里介绍的是spark内置的一种监控方式

如果是sparkStreaming,对应的则是streamingListener

package cn.com.kong;

import org.apache.spark.SparkConf;
import org.apache.spark.scheduler.*;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.status.AppStatusStore;
import org.apache.spark.status.api.v1.ApplicationInfo; public class CustomSparkListener { public static void main(String[] args) { System.setProperty("HADOOP_USER_NAME","etluser"); SparkConf conf = new SparkConf();
conf.set("spark.hadoopRDD.ignoreEmptySplits", "true");
conf.set("spark.sql.adaptive.enabled", "true");
conf.set("spark.sql.adaptive.join.enabled", "true");
conf.set("spark.executor.memoryOverhead", "1024");
conf.set("spark.driver.memoryOverhead", "1024");
conf.set("spark.kryoserializer.buffer.max", "256m");
conf.set("spark.kryoserializer.buffer", "64m");
conf.set("spark.executor.extraJavaOptions", "-XX:+UseG1GC -Dlog4j.configuration=log4j.properties");
conf.set("spark.driver.extraJavaOptions", "-XX:+UseG1GC -Dlog4j.configuration=log4j.properties");
conf.set("spark.sql.parquet.writeLegacyFormat", "true"); SparkSession spark = SparkSession
.builder()
.appName("testSparkListener")
.master("local")
.config(conf)
.enableHiveSupport()
.getOrCreate(); spark.sql("use coveroptimize"); // AppStatusStore appStatusStore = spark.sparkContext().statusStore();
// ApplicationInfo applicationInfo = appStatusStore.applicationInfo();
// applicationInfo.memoryPerExecutorMB(); //可以创建一个类实现Listener接口,然后调用该类实例。
//这里测试,直接创建
spark.sparkContext().addSparkListener(new SparkListenerInterface() {
@Override
public void onExecutorRemoved( SparkListenerExecutorRemoved executorRemoved) {
} /**
* Called when a stage completes successfully or fails, with information on the completed stage.
*/
@Override
public void onStageCompleted( SparkListenerStageCompleted stageCompleted) { } @Override
public void onStageSubmitted( SparkListenerStageSubmitted stageSubmitted) { } @Override
public void onTaskStart(SparkListenerTaskStart taskStart) { }
/**
* Called when a job ends
*/
@Override
public void onJobEnd(SparkListenerJobEnd jobEnd) {
JobResult jobResult = jobEnd.jobResult();
System.err.println("自定义监听器jobEnd jobResult:"+jobResult);
}
/**
* Called when a job starts
*/
@Override
public void onJobStart(SparkListenerJobStart jobStart) {
System.err.println("自定义监听器jobStart,jobId:"+jobStart.jobId());
System.err.println("自定义监听器jobStart,该job下stage数量:"+jobStart.stageInfos().size());
} @Override
public void onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate) { } @Override
public void onExecutorAdded(SparkListenerExecutorAdded executorAdded) { } @Override
public void onNodeUnblacklisted(SparkListenerNodeUnblacklisted nodeUnblacklisted) { }
/**
* Called when the application ends
*/
@Override
public void onApplicationEnd(SparkListenerApplicationEnd applicationEnd) {
System.err.println("Application结束,时间:"+applicationEnd.time());
} @Override
public void onNodeBlacklisted(SparkListenerNodeBlacklisted nodeBlacklisted) { } @Override
public void onUnpersistRDD(SparkListenerUnpersistRDD unpersistRDD) { } @Override
public void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult) { } @Override
public void onOtherEvent(SparkListenerEvent event) { } @Override
public void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate) { } @Override
public void onSpeculativeTaskSubmitted(SparkListenerSpeculativeTaskSubmitted speculativeTask) { } @Override
public void onExecutorBlacklisted(SparkListenerExecutorBlacklisted executorBlacklisted) { } @Override
public void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved) { }
/**
* Called when the application starts
*/
@Override
public void onApplicationStart(SparkListenerApplicationStart applicationStart) {
System.err.println("Application启动,appName:"+applicationStart.appName()+",appID"+
applicationStart.appId());
} @Override
public void onExecutorUnblacklisted(SparkListenerExecutorUnblacklisted executorUnblacklisted) { } @Override
public void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded) { } @Override
public void onBlockUpdated(SparkListenerBlockUpdated blockUpdated) { } @Override
public void onTaskEnd(SparkListenerTaskEnd taskEnd) { }
}); String sql1 = "select roadid,count(1) cn from gridmappingroad group by roadid";
spark.sql(sql1).repartition(2).write().mode(SaveMode.Overwrite)
.saveAsTable("test_listener_table"); spark.stop();
}
}

运行日志:

// :: INFO spark.SparkContext: Running Spark version 2.3.
// :: INFO spark.SparkContext: Submitted application: testSparkListener
// :: INFO spark.SecurityManager: Changing view acls to: kongshuaiwei,etluser
// :: INFO spark.SecurityManager: Changing modify acls to: kongshuaiwei,etluser
// :: INFO spark.SecurityManager: Changing view acls groups to:
// :: INFO spark.SecurityManager: Changing modify acls groups to:
// :: INFO spark.SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(kongshuaiwei, etluser); groups with view permissions: Set(); users with modify permissions: Set(kongshuaiwei, etluser); groups with modify permissions: Set()
// :: INFO util.Utils: Successfully started service 'sparkDriver' on port .
// :: INFO spark.SparkEnv: Registering MapOutputTracker
// :: INFO spark.SparkEnv: Registering BlockManagerMaster
// :: INFO storage.BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
// :: INFO storage.BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
// :: INFO storage.DiskBlockManager: Created local directory at C:\Users\kongshuaiwei\AppData\Local\Temp\blockmgr-b8c578de--4cf3-9e8d-928159f3aecd
// :: INFO memory.MemoryStore: MemoryStore started with capacity 898.5 MB
// :: INFO spark.SparkEnv: Registering OutputCommitCoordinator
// :: INFO util.log: Logging initialized @1729ms
// :: INFO server.Server: jetty-9.3.z-SNAPSHOT
// :: INFO server.Server: Started @1788ms
// :: INFO server.AbstractConnector: Started ServerConnector@5cc5b667{HTTP/1.1,[http/1.1]}{0.0.0.0:}
// :: INFO util.Utils: Successfully started service 'SparkUI' on port .
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@410954b{/jobs,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@10b892d5{/jobs/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@3d3f761a{/jobs/job,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@579d011c{/jobs/job/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@3670f00{/stages,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@452e26d0{/stages/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@46ab18da{/stages/stage,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@7689ddef{/stages/stage/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@687a762c{/stages/pool,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@1a2e2935{/stages/pool/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@733c423e{/storage,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@4b629f13{/storage/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@70925b45{/storage/rdd,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@1b9ea3e3{/storage/rdd/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@aa22f1c{/environment,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@55e7a35c{/environment/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@37cd92d6{/executors,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@5922ae77{/executors/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@4263b080{/executors/threadDump,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@2af616d3{/executors/threadDump/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@71f67a79{/static,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@34abdee4{/,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@71a9b4c7{/api,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@21ca139c{/jobs/job/kill,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@226f885f{/stages/stage/kill,null,AVAILABLE,@Spark}
// :: INFO ui.SparkUI: Bound SparkUI to 0.0.0.0, and started at http://sl1-43087-b01.BJ.DATANGMOBILE.com:4040
// :: INFO executor.Executor: Starting executor ID driver on host localhost
// :: INFO util.Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port .
// :: INFO netty.NettyBlockTransferService: Server created on sl1--b01.BJ.DATANGMOBILE.com:
// :: INFO storage.BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
// :: INFO storage.BlockManagerMaster: Registering BlockManager BlockManagerId(driver, sl1--b01.BJ.DATANGMOBILE.com, , None)
// :: INFO storage.BlockManagerMasterEndpoint: Registering block manager sl1--b01.BJ.DATANGMOBILE.com: with 898.5 MB RAM, BlockManagerId(driver, sl1--b01.BJ.DATANGMOBILE.com, , None)
// :: INFO storage.BlockManagerMaster: Registered BlockManager BlockManagerId(driver, sl1--b01.BJ.DATANGMOBILE.com, , None)
// :: INFO storage.BlockManager: Initialized BlockManager: BlockManagerId(driver, sl1--b01.BJ.DATANGMOBILE.com, , None)
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@33a630fa{/metrics/json,null,AVAILABLE,@Spark}
// :: INFO internal.SharedState: loading hive config file: file:/D:/ideaIC/workspace/spark-project/coverOptimize/target/classes/hive-site.xml
// :: INFO internal.SharedState: spark.sql.warehouse.dir is not set, but hive.metastore.warehouse.dir is set. Setting spark.sql.warehouse.dir to the value of hive.metastore.warehouse.dir ('/user/hive/warehouse').
// :: INFO internal.SharedState: Warehouse path is '/user/hive/warehouse'.
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@2fb5fe30{/SQL,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@456be73c{/SQL/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@41a6d121{/SQL/execution,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@4f449e8f{/SQL/execution/json,null,AVAILABLE,@Spark}
// :: INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@27e32fe4{/static/sql,null,AVAILABLE,@Spark}
// :: INFO state.StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
// :: INFO hive.HiveUtils: Initializing HiveMetastoreConnection version 1.2. using Spark classes.
// :: INFO hive.metastore: Trying to connect to metastore with URI thrift://worker03.xxx.xxx.cn:9083// :: INFO hive.metastore: Connected to metastore.
// :: INFO session.SessionState: Created local directory: C:/Users/KONGSH~/AppData/Local/Temp/f35b7531-c964-4d2e-8ba5-b5ade205d12a_resources
// :: INFO session.SessionState: Created HDFS directory: /tmp/hive/etluser/f35b7531-c964-4d2e-8ba5-b5ade205d12a
// :: INFO session.SessionState: Created local directory: C:/Users/KONGSH~/AppData/Local/Temp/kongshuaiwei/f35b7531-c964-4d2e-8ba5-b5ade205d12a
// :: INFO session.SessionState: Created HDFS directory: /tmp/hive/etluser/f35b7531-c964-4d2e-8ba5-b5ade205d12a/_tmp_space.db
// :: INFO client.HiveClientImpl: Warehouse location for Hive client (version 1.2.) is /user/hive/warehouse
// :: INFO parquet.ParquetFileFormat: Using default output committer for Parquet: org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using user defined output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO codegen.CodeGenerator: Code generated in 194.663548 ms
// :: INFO codegen.CodeGenerator: Code generated in 42.50705 ms
// :: INFO memory.MemoryStore: Block broadcast_0 stored as values in memory (estimated size 249.3 KB, free 898.3 MB)
// :: INFO memory.MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 24.6 KB, free 898.2 MB)
// :: INFO storage.BlockManagerInfo: Added broadcast_0_piece0 in memory on sl1--b01.BJ.DATANGMOBILE.com: (size: 24.6 KB, free: 898.5 MB)
// :: INFO spark.ContextCleaner: Cleaned accumulator
// :: INFO spark.ContextCleaner: Cleaned accumulator
// :: INFO spark.SparkContext: Created broadcast from
// :: INFO spark.ContextCleaner: Cleaned accumulator
// :: INFO spark.ContextCleaner: Cleaned accumulator
// :: WARN security.UserGroupInformation: No groups available for user etluser
// :: WARN security.UserGroupInformation: No groups available for user etluser
// :: INFO mapred.FileInputFormat: Total input paths to process :
.....
// :: INFO scheduler.DAGScheduler: Registering RDD (saveAsTable at SparkTest.java:)
// :: INFO scheduler.DAGScheduler: Got map stage job (saveAsTable at SparkTest.java:) with output partitions
// :: INFO scheduler.DAGScheduler: Final stage: ShuffleMapStage (saveAsTable at SparkTest.java:)
// :: INFO scheduler.DAGScheduler: Parents of final stage: List()
// :: INFO scheduler.DAGScheduler: Missing parents: List()
自定义监听器jobStart,jobId:
自定义监听器jobStart,该job下stage数量:
// :: INFO scheduler.DAGScheduler: Submitting ShuffleMapStage (MapPartitionsRDD[] at saveAsTable at SparkTest.java:), which has no missing parents
// :: INFO memory.MemoryStore: Block broadcast_1 stored as values in memory (estimated size 30.1 KB, free 898.2 MB)
// :: INFO memory.MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 13.7 KB, free 898.2 MB)
// :: INFO storage.BlockManagerInfo: Added broadcast_1_piece0 in memory on sl1--b01.BJ.DATANGMOBILE.com: (size: 13.7 KB, free: 898.5 MB)
// :: INFO spark.SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
...
// :: INFO scheduler.TaskSetManager: Finished task 30.0 in stage 0.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO scheduler.TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
// :: INFO scheduler.DAGScheduler: ShuffleMapStage (saveAsTable at SparkTest.java:) finished in 359.468 s
// :: INFO scheduler.DAGScheduler: looking for newly runnable stages
// :: INFO scheduler.DAGScheduler: running: Set()
// :: INFO scheduler.DAGScheduler: waiting: Set()
// :: INFO scheduler.DAGScheduler: failed: Set()
// :: INFO exchange.ExchangeCoordinator: advisoryTargetPostShuffleInputSize: , targetPostShuffleInputSize .
自定义监听器jobEnd jobResult:JobSucceeded
// :: INFO spark.SparkContext: Starting job: saveAsTable at SparkTest.java:
// :: INFO scheduler.DAGScheduler: Registering RDD (saveAsTable at SparkTest.java:)
// :: INFO scheduler.DAGScheduler: Got job (saveAsTable at SparkTest.java:) with output partitions
// :: INFO scheduler.DAGScheduler: Final stage: ResultStage (saveAsTable at SparkTest.java:)
// :: INFO scheduler.DAGScheduler: Parents of final stage: List(ShuffleMapStage )
// :: INFO scheduler.DAGScheduler: Missing parents: List(ShuffleMapStage )
自定义监听器jobStart,jobId:
自定义监听器jobStart,该job下stage数量:
// :: INFO scheduler.DAGScheduler: Submitting ShuffleMapStage (MapPartitionsRDD[] at saveAsTable at SparkTest.java:), which has no missing parents
// :: INFO memory.MemoryStore: Block broadcast_2 stored as values in memory (estimated size 22.2 KB, free 898.2 MB)
// :: INFO memory.MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 10.5 KB, free 898.2 MB)
// :: INFO storage.BlockManagerInfo: Added broadcast_2_piece0 in memory on sl1--b01.BJ.DATANGMOBILE.com: (size: 10.5 KB, free: 898.5 MB)
// :: INFO spark.SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO scheduler.DAGScheduler: Submitting missing tasks from ShuffleMapStage (MapPartitionsRDD[] at saveAsTable at SparkTest.java:) (first tasks are for partitions Vector())
// :: INFO scheduler.TaskSchedulerImpl: Adding task set 2.0 with tasks
// :: INFO scheduler.TaskSetManager: Starting task 0.0 in stage 2.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO executor.Executor: Running task 0.0 in stage 2.0 (TID )
// :: INFO storage.ShuffleBlockFetcherIterator: Getting non-empty blocks out of blocks
// :: INFO storage.ShuffleBlockFetcherIterator: Started remote fetches in ms
// :: INFO executor.Executor: Finished task 0.0 in stage 2.0 (TID ). bytes result sent to driver
// :: INFO scheduler.TaskSetManager: Finished task 0.0 in stage 2.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO scheduler.TaskSchedulerImpl: Removed TaskSet 2.0, whose tasks have all completed, from pool
// :: INFO scheduler.DAGScheduler: ShuffleMapStage (saveAsTable at SparkTest.java:) finished in 0.135 s
// :: INFO scheduler.DAGScheduler: looking for newly runnable stages
// :: INFO scheduler.DAGScheduler: running: Set()
// :: INFO scheduler.DAGScheduler: waiting: Set(ResultStage )
// :: INFO scheduler.DAGScheduler: failed: Set()
// :: INFO scheduler.DAGScheduler: Submitting ResultStage (ShuffledRowRDD[] at saveAsTable at SparkTest.java:), which has no missing parents
// :: INFO memory.MemoryStore: Block broadcast_3 stored as values in memory (estimated size 148.9 KB, free 898.0 MB)
// :: INFO memory.MemoryStore: Block broadcast_3_piece0 stored as bytes in memory (estimated size 52.1 KB, free 898.0 MB)
// :: INFO storage.BlockManagerInfo: Added broadcast_3_piece0 in memory on sl1--b01.BJ.DATANGMOBILE.com: (size: 52.1 KB, free: 898.4 MB)
// :: INFO spark.SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO scheduler.DAGScheduler: Submitting missing tasks from ResultStage (ShuffledRowRDD[] at saveAsTable at SparkTest.java:) (first tasks are for partitions Vector(, ))
// :: INFO scheduler.TaskSchedulerImpl: Adding task set 3.0 with tasks
// :: INFO scheduler.TaskSetManager: Starting task 0.0 in stage 3.0 (TID , localhost, executor driver, partition , ANY, bytes)
// :: INFO executor.Executor: Running task 0.0 in stage 3.0 (TID )
// :: INFO storage.ShuffleBlockFetcherIterator: Getting non-empty blocks out of blocks
// :: INFO storage.ShuffleBlockFetcherIterator: Started remote fetches in ms
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using user defined output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO parquet.ParquetWriteSupport: Initialized Parquet WriteSupport with Catalyst schema:
{
"type" : "struct",
"fields" : [ {
"name" : "roadid",
"type" : "string",
"nullable" : true,
"metadata" : {
"comment" : "??id"
}
}, {
"name" : "cn",
"type" : "long",
"nullable" : false,
"metadata" : { }
} ]
}
and corresponding Parquet message type:
message spark_schema {
optional binary roadid (UTF8);
required int64 cn;
} // :: INFO compress.CodecPool: Got brand-new compressor [.snappy]
// :: INFO output.FileOutputCommitter: Saved output of task 'attempt_20200117135657_0003_m_000000_0' to hdfs://master01.xxx.xxx.cn:8020/user/hive/warehouse/coveroptimize.db/test_listener_table/_temporary/0/task_20200117135657_0003_m_000000
// :: INFO mapred.SparkHadoopMapRedUtil: attempt_20200117135657_0003_m_000000_0: Committed
// :: INFO executor.Executor: Finished task 0.0 in stage 3.0 (TID ). bytes result sent to driver
// :: INFO scheduler.TaskSetManager: Starting task 1.0 in stage 3.0 (TID , localhost, executor driver, partition , ANY, bytes)
// :: INFO executor.Executor: Running task 1.0 in stage 3.0 (TID )
// :: INFO scheduler.TaskSetManager: Finished task 0.0 in stage 3.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO storage.ShuffleBlockFetcherIterator: Getting non-empty blocks out of blocks
// :: INFO storage.ShuffleBlockFetcherIterator: Started remote fetches in ms
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using user defined output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO datasources.SQLHadoopMapReduceCommitProtocol: Using output committer class org.apache.parquet.hadoop.ParquetOutputCommitter
// :: INFO parquet.ParquetWriteSupport: Initialized Parquet WriteSupport with Catalyst schema:
{
"type" : "struct",
"fields" : [ {
"name" : "roadid",
"type" : "string",
"nullable" : true,
"metadata" : {
"comment" : "??id"
}
}, {
"name" : "cn",
"type" : "long",
"nullable" : false,
"metadata" : { }
} ]
}
and corresponding Parquet message type:
message spark_schema {
optional binary roadid (UTF8);
required int64 cn;
} // :: INFO output.FileOutputCommitter: Saved output of task 'attempt_20200117135657_0003_m_000001_0' to hdfs://master01.xxx.xxx.cn:8020/user/hive/warehouse/coveroptimize.db/test_listener_table/_temporary/0/task_20200117135657_0003_m_000001
// :: INFO mapred.SparkHadoopMapRedUtil: attempt_20200117135657_0003_m_000001_0: Committed
// :: INFO executor.Executor: Finished task 1.0 in stage 3.0 (TID ). bytes result sent to driver
// :: INFO scheduler.TaskSetManager: Finished task 1.0 in stage 3.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO scheduler.TaskSchedulerImpl: Removed TaskSet 3.0, whose tasks have all completed, from pool
// :: INFO scheduler.DAGScheduler: ResultStage (saveAsTable at SparkTest.java:) finished in 0.466 s
自定义监听器jobEnd jobResult:JobSucceeded
// :: INFO scheduler.DAGScheduler: Job finished: saveAsTable at SparkTest.java:, took 0.616115 s
// :: INFO datasources.FileFormatWriter: Job null committed.
// :: INFO datasources.FileFormatWriter: Finished processing stats for job null.
// :: INFO hive.HiveExternalCatalog: Persisting file based data source table `coveroptimize`.`test_listener_table` into Hive metastore in Hive compatible format.
Application结束,时间:
// :: INFO server.AbstractConnector: Stopped Spark@5cc5b667{HTTP/1.1,[http/1.1]}{0.0.0.0:}
// :: INFO ui.SparkUI: Stopped Spark web UI at http://sl1-43087-b01.BJ.DATANGMOBILE.com:4040
// :: INFO spark.MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
// :: INFO memory.MemoryStore: MemoryStore cleared
// :: INFO storage.BlockManager: BlockManager stopped
// :: INFO storage.BlockManagerMaster: BlockManagerMaster stopped
// :: INFO scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
// :: INFO spark.SparkContext: Successfully stopped SparkContext
// :: INFO util.ShutdownHookManager: Shutdown hook called
// :: INFO util.ShutdownHookManager: Deleting directory C:\Users\kongshuaiwei\AppData\Local\Temp\spark-547cf37e-2d1e-433c-a584-6c5b7365909f Process finished with exit code

最新文章

  1. 一个美术需求引发的Custom Inspector
  2. ubuntu中chromium无法播放flash,安装flash
  3. 【cs229-Lecture16】马尔可夫决策过程
  4. 跟踪js文件作为iframe页面不起作用时(淘宝天猫)
  5. css3学习笔记之背景
  6. 启用了不安全的HTTP方法
  7. 分享一个在PearOS里面的plank的配置文件
  8. AutoFac使用方法总结:Part II
  9. Arch声卡配置
  10. Android安全讲座第九层(二) 内存dump
  11. Android 之数据存储(sdCard,sharedPreference,sqlite数据库)
  12. Physical Plausible Shading
  13. Address localhost:1099 is already in use
  14. 「PKUSC2018」神仙的游戏
  15. 20135218 Linux 实践二 编译模块
  16. 关于ddx/ddy重建法线在edge边沿上的artifacts问题
  17. 前端js如何生成一个对象,并转化为json字符串
  18. CH1102 火车进出栈问题(高精/卡特兰数)
  19. EM算法[转]
  20. Java学习(final、static关键词)

热门文章

  1. Jmeter_请求原件之参数化txt
  2. 监控Tomcat状态!(重点)
  3. JShell的使用
  4. 深入理解 ajax系列第一篇(XHR 对象)
  5. ALSA 有关文档
  6. Caffe2 手册(Intro Tutorial)[2]
  7. tomcat启动报错failed to start component
  8. 02-09Android学习进度报告九
  9. ApacheDbUtilsUpdate
  10. Web - 实用组件