DStreamGraph有点像简洁版的DAG scheduler,负责根据某个时间间隔生成一序列JobSet,以及按照依赖关系序列化。这个类的inputStream和outputStream是最重要的属性。spark stream将动态的输入流与对流的处理通过一个shuffle来连接。前面的(shuffle map)是input stream,其实是DStream的子类,它们负责将收集的数据以block的方式存到spark memory中;而output stream,是另外的一系类DStream,负责将数据从spark memory读取出来,分解成spark core中的RDD,然后再做数据处理。





final private[streaming] class DStreamGraph extends Serializable with Logging {

private val inputStreams = new ArrayBuffer[InputDStream[_]]()
private val outputStreams = new ArrayBuffer[DStream[_]]()

var rememberDuration: Duration = null
var checkpointInProgress = false

var zeroTime: Time = null
var startTime: Time = null
var batchDuration: Duration = null


def addInputStream(inputStream: InputDStream[_]) {
this.synchronized {
inputStream.setGraph(this)
inputStreams += inputStream
}
}

def addOutputStream(outputStream: DStream[_]) {
this.synchronized {
outputStream.setGraph(this)
outputStreams += outputStream
}
}

def getInputStreams() = this.synchronized { inputStreams.toArray }

def getOutputStreams() = this.synchronized { outputStreams.toArray }

def getReceiverInputStreams() = this.synchronized {
inputStreams.filter(_.isInstanceOf[ReceiverInputDStream[_]])
.map(_.asInstanceOf[ReceiverInputDStream[_]])
.toArray
}


def generateJobs(time: Time): Seq[Job] = {
logDebug("Generating jobs for time " + time)
val jobs = this.synchronized {
outputStreams.flatMap(outputStream => outputStream.generateJob(time))
}
logDebug("Generated " + jobs.length + " jobs for time " + time)
jobs
}


@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.writeObject used")
this.synchronized {
checkpointInProgress = true
logDebug("Enabled checkpoint mode")
oos.defaultWriteObject()
checkpointInProgress = false
logDebug("Disabled checkpoint mode")
}
}

@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.readObject used")
this.synchronized {
checkpointInProgress = true
ois.defaultReadObject()
checkpointInProgress = false
}
}

JobScheduler负责产生jobs
/**
* This class schedules jobs to be run on Spark. It uses the JobGenerator to generate
* the jobs and runs them using a thread pool.
*/
private[streaming]
class JobScheduler(val ssc: StreamingContext) extends Logging {

private val jobSets = new ConcurrentHashMap[Time, JobSet]
private val numConcurrentJobs = ssc.conf.getInt("spark.streaming.concurrentJobs", 1)
private val jobExecutor = Executors.newFixedThreadPool(numConcurrentJobs)
private val jobGenerator = new JobGenerator(this)
val clock = jobGenerator.clock
val listenerBus = new StreamingListenerBus()

// These two are created only when scheduler starts.
// eventActor not being null means the scheduler has been started and not stopped
var receiverTracker: ReceiverTracker = null
private var eventActor: ActorRef = null

def start(): Unit = synchronized {
if (eventActor != null) return // scheduler has already been started

logDebug("Starting JobScheduler")
eventActor = ssc.env.actorSystem.actorOf(Props(new Actor {
def receive = {
case event: JobSchedulerEvent => processEvent(event)
}
}), "JobScheduler")

listenerBus.start()
receiverTracker = new ReceiverTracker(ssc)
receiverTracker.start()
jobGenerator.start()
logInfo("Started JobScheduler")
}

def submitJobSet(jobSet: JobSet) {
if (jobSet.jobs.isEmpty) {
logInfo("No jobs added for time " + jobSet.time)
} else {
jobSets.put(jobSet.time, jobSet)
jobSet.jobs.foreach(job => jobExecutor.execute(new JobHandler(job)))
logInfo("Added jobs for time " + jobSet.time)
}
}

private class JobHandler(job: Job) extends Runnable {
def run() {
eventActor ! JobStarted(job)
job.run()
eventActor ! JobCompleted(job)
}
}
job完成后处理
private def handleJobCompletion(job: Job) {
job.result match {
case Success(_) =>
val jobSet = jobSets.get(job.time)
jobSet.handleJobCompletion(job)
logInfo("Finished job " + job.id + " from job set of time " + jobSet.time)
if (jobSet.hasCompleted) {
jobSets.remove(jobSet.time)
jobGenerator.onBatchCompletion(jobSet.time)
logInfo("Total delay: %.3f s for time %s (execution: %.3f s)".format(
jobSet.totalDelay / 1000.0, jobSet.time.toString,
jobSet.processingDelay / 1000.0
))
listenerBus.post(StreamingListenerBatchCompleted(jobSet.toBatchInfo))
}
case Failure(e) =>
reportError("Error running job " + job, e)
}
}









最新文章

  1. Unity4.0的使用
  2. UITableView去掉section的header的粘性
  3. seajs的CMD模式的优势以及使用
  4. git stash简介
  5. SQL Server ->> EXECUTE AS LOGIN/USER和Revert表达式
  6. 入门-Arcmap网络分析示例
  7. 跟开涛老师学shiro -- 编码/加密
  8. URAL 2073 Log Files (模拟)
  9. 通过Workbook类 生成Excel导出数据
  10. 论在Repository中使用EF框架
  11. ****Curling 2.0(深搜+回溯)
  12. gnome3
  13. swift -- 类中的方法
  14. nginx常用配置系列-HTTPS配置
  15. Google Interview University 一套完整的学习手册帮助自己准备 Google 的面试
  16. excel身份证验证(附带防止粘贴导致校验失效的函数)
  17. Android OpenSL ES 开发:Android OpenSL 介绍和开发流程说明
  18. 使用nginx做反向代理和负载均衡效果图
  19. 如何优化Docker储存
  20. vim中将小写替换为大写--快速解决变量名风格

热门文章

  1. 99乘法表(js)
  2. python 利用pyttsx3文字转语音
  3. java代码备份mysql数据库
  4. onItemSelected 获取选中的 信息 3种方法
  5. NTP时间同步服务和DNS服务
  6. Linux根文件系统和目录结构及bash特性4
  7. java中有个很强大的工具jconsole.exe
  8. Django REST Framework(DRF)_第四篇
  9. java中activiti框架中的排他网关使用方法,多条件判断
  10. Android异常与性能优化相关面试问题-内存管理面试问题详解