public class MRAppMaster
extends org.apache.hadoop.service.CompositeService
| Modifier and Type | Class and Description |
|---|---|
class |
MRAppMaster.RunningAppContext |
| Modifier and Type | Field and Description |
|---|---|
protected org.apache.hadoop.security.UserGroupInformation |
currentUser |
static String |
INTERMEDIATE_DATA_ENCRYPTION_ALGO |
protected boolean |
isLastAMRetry |
protected MRAppMetrics |
metrics |
static int |
SHUTDOWN_HOOK_PRIORITY
Priority of the MRAppMaster shutdown hook.
|
protected AtomicBoolean |
successfullyUnregistered |
| Constructor and Description |
|---|
MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
org.apache.hadoop.yarn.util.Clock clock,
long appSubmitTime) |
MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
long appSubmitTime) |
| Modifier and Type | Method and Description |
|---|---|
void |
cleanupStagingDir()
clean up staging directories for the job.
|
protected ClientService |
createClientService(AppContext context) |
protected org.apache.hadoop.yarn.event.EventHandler<CommitterEvent> |
createCommitterEventHandler(AppContext context,
org.apache.hadoop.mapreduce.OutputCommitter committer) |
protected ContainerAllocator |
createContainerAllocator(ClientService clientService,
AppContext context) |
protected ContainerLauncher |
createContainerLauncher(AppContext context) |
protected org.apache.hadoop.yarn.event.Dispatcher |
createDispatcher() |
protected Job |
createJob(org.apache.hadoop.conf.Configuration conf,
JobStateInternal forcedState,
String diagnostic)
Create and initialize (but don't start) a single job.
|
protected org.apache.hadoop.yarn.event.EventHandler<JobFinishEvent> |
createJobFinishEventHandler()
create an event handler that handles the job finish event.
|
protected org.apache.hadoop.yarn.event.EventHandler<JobHistoryEvent> |
createJobHistoryHandler(AppContext context) |
protected Speculator |
createSpeculator(org.apache.hadoop.conf.Configuration conf,
AppContext context) |
protected org.apache.hadoop.service.AbstractService |
createStagingDirCleaningService() |
protected TaskAttemptFinishingMonitor |
createTaskAttemptFinishingMonitor(org.apache.hadoop.yarn.event.EventHandler eventHandler) |
protected TaskAttemptListener |
createTaskAttemptListener(AppContext context) |
List<org.apache.hadoop.mapreduce.v2.api.records.AMInfo> |
getAllAMInfos() |
org.apache.hadoop.yarn.api.records.ApplicationId |
getAppID() |
org.apache.hadoop.yarn.api.records.ApplicationAttemptId |
getAttemptID() |
ClientService |
getClientService() |
org.apache.hadoop.mapreduce.OutputCommitter |
getCommitter() |
Map<org.apache.hadoop.mapreduce.v2.api.records.TaskId,org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo> |
getCompletedTaskFromPreviousRun() |
ContainerAllocator |
getContainerAllocator() |
ContainerLauncher |
getContainerLauncher() |
AppContext |
getContext() |
protected org.apache.hadoop.security.Credentials |
getCredentials() |
org.apache.hadoop.yarn.event.Dispatcher |
getDispatcher() |
protected org.apache.hadoop.fs.FileSystem |
getFileSystem(org.apache.hadoop.conf.Configuration conf)
Create the default file System for this job.
|
org.apache.hadoop.mapreduce.v2.api.records.JobId |
getJobId() |
protected RMHeartbeatHandler |
getRMHeartbeatHandler() |
int |
getStartCount() |
TaskAttemptListener |
getTaskAttemptListener() |
protected static void |
initAndStartAppMaster(MRAppMaster appMaster,
org.apache.hadoop.mapred.JobConf conf,
String jobUserName) |
protected void |
initJobCredentialsAndUGI(org.apache.hadoop.conf.Configuration conf)
Obtain the tokens needed by the job and put them in the UGI
|
Boolean |
isLastAMRetry() |
boolean |
isNewApiCommitter() |
protected boolean |
keepJobFiles(org.apache.hadoop.mapred.JobConf conf,
String jobTempDir) |
static void |
main(String[] args) |
void |
notifyIsLastAMRetry(boolean isLastAMRetry) |
boolean |
recovered() |
protected void |
serviceInit(org.apache.hadoop.conf.Configuration conf) |
protected void |
serviceStart() |
protected void |
serviceStop() |
void |
shutDownJob() |
protected void |
startJobs()
This can be overridden to instantiate multiple jobs and create a
workflow.
|
void |
stop() |
protected void |
sysexit()
Exit call.
|
addIfService, addService, getServices, removeServiceclose, getBlockers, getConfig, getFailureCause, getFailureState, getLifecycleHistory, getName, getServiceState, getStartTime, init, isInState, noteFailure, putBlocker, registerGlobalListener, registerServiceListener, removeBlocker, setConfig, start, toString, unregisterGlobalListener, unregisterServiceListener, waitForServiceToStoppublic static final int SHUTDOWN_HOOK_PRIORITY
public static final String INTERMEDIATE_DATA_ENCRYPTION_ALGO
protected final MRAppMetrics metrics
protected org.apache.hadoop.security.UserGroupInformation currentUser
protected volatile boolean isLastAMRetry
protected AtomicBoolean successfullyUnregistered
public MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
long appSubmitTime)
public MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
org.apache.hadoop.yarn.util.Clock clock,
long appSubmitTime)
protected TaskAttemptFinishingMonitor createTaskAttemptFinishingMonitor(org.apache.hadoop.yarn.event.EventHandler eventHandler)
protected void serviceInit(org.apache.hadoop.conf.Configuration conf)
throws Exception
serviceInit in class org.apache.hadoop.service.CompositeServiceExceptionprotected org.apache.hadoop.yarn.event.Dispatcher createDispatcher()
protected boolean keepJobFiles(org.apache.hadoop.mapred.JobConf conf,
String jobTempDir)
protected org.apache.hadoop.fs.FileSystem getFileSystem(org.apache.hadoop.conf.Configuration conf)
throws IOException
conf - the conf objectIOExceptionprotected org.apache.hadoop.security.Credentials getCredentials()
public void cleanupStagingDir()
throws IOException
IOExceptionprotected void sysexit()
public void shutDownJob()
protected org.apache.hadoop.yarn.event.EventHandler<JobFinishEvent> createJobFinishEventHandler()
protected Job createJob(org.apache.hadoop.conf.Configuration conf, JobStateInternal forcedState, String diagnostic)
forcedState - a state to force the job into or null for normal operation.diagnostic - a diagnostic message to include with the job.protected void initJobCredentialsAndUGI(org.apache.hadoop.conf.Configuration conf)
conf - protected org.apache.hadoop.yarn.event.EventHandler<JobHistoryEvent> createJobHistoryHandler(AppContext context)
protected org.apache.hadoop.service.AbstractService createStagingDirCleaningService()
protected Speculator createSpeculator(org.apache.hadoop.conf.Configuration conf, AppContext context)
protected TaskAttemptListener createTaskAttemptListener(AppContext context)
protected org.apache.hadoop.yarn.event.EventHandler<CommitterEvent> createCommitterEventHandler(AppContext context, org.apache.hadoop.mapreduce.OutputCommitter committer)
protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context)
protected RMHeartbeatHandler getRMHeartbeatHandler()
protected ContainerLauncher createContainerLauncher(AppContext context)
protected ClientService createClientService(AppContext context)
public org.apache.hadoop.yarn.api.records.ApplicationId getAppID()
public org.apache.hadoop.yarn.api.records.ApplicationAttemptId getAttemptID()
public org.apache.hadoop.mapreduce.v2.api.records.JobId getJobId()
public org.apache.hadoop.mapreduce.OutputCommitter getCommitter()
public boolean isNewApiCommitter()
public int getStartCount()
public AppContext getContext()
public org.apache.hadoop.yarn.event.Dispatcher getDispatcher()
public Map<org.apache.hadoop.mapreduce.v2.api.records.TaskId,org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo> getCompletedTaskFromPreviousRun()
public List<org.apache.hadoop.mapreduce.v2.api.records.AMInfo> getAllAMInfos()
public ContainerAllocator getContainerAllocator()
public ContainerLauncher getContainerLauncher()
public TaskAttemptListener getTaskAttemptListener()
public Boolean isLastAMRetry()
protected void serviceStart()
throws Exception
serviceStart in class org.apache.hadoop.service.CompositeServiceExceptionpublic void stop()
stop in interface org.apache.hadoop.service.Servicestop in class org.apache.hadoop.service.AbstractServicepublic boolean recovered()
protected void startJobs()
public static void main(String[] args)
public void notifyIsLastAMRetry(boolean isLastAMRetry)
protected static void initAndStartAppMaster(MRAppMaster appMaster, org.apache.hadoop.mapred.JobConf conf, String jobUserName) throws IOException, InterruptedException
IOExceptionInterruptedExceptionprotected void serviceStop()
throws Exception
serviceStop in class org.apache.hadoop.service.CompositeServiceExceptionpublic ClientService getClientService()
Copyright © 2022 Apache Software Foundation. All rights reserved.