public class LocalJobRunner
extends java.lang.Object
Modifier and Type | Field and Description |
---|---|
static java.lang.String |
LOCAL_MAX_MAPS |
static org.apache.commons.logging.Log |
LOG |
static long |
versionID |
Constructor and Description |
---|
LocalJobRunner(JobConf conf) |
Modifier and Type | Method and Description |
---|---|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
Cancel a delegation token.
|
JobStatus[] |
getAllJobs()
Get all the jobs submitted.
|
TaskReport[] |
getCleanupTaskReports(JobID id)
Grab a bunch of info on the cleanup tasks that make up the job
|
ClusterStatus |
getClusterStatus(boolean detailed)
Get the current status of the cluster
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer)
Get a new delegation token.
|
java.lang.String |
getFilesystemName()
A MapReduce system always operates on a single filesystem.
|
Counters |
getJobCounters(JobID id)
Grab the current job counters
|
JobProfile |
getJobProfile(JobID id)
Grab a handle to a job that is already known to the JobTracker.
|
JobStatus[] |
getJobsFromQueue(java.lang.String queue)
Gets all the jobs submitted to the particular Queue
|
JobStatus |
getJobStatus(JobID id)
Grab a handle to a job that is already known to the JobTracker.
|
static int |
getLocalMaxRunningMaps(JobContext job) |
TaskReport[] |
getMapTaskReports(JobID id)
Grab a bunch of info on the map tasks that make up the job
|
JobID |
getNewJobId()
Allocate a name for the job.
|
org.apache.hadoop.ipc.ProtocolSignature |
getProtocolSignature(java.lang.String protocol,
long clientVersion,
int clientMethodsHash) |
long |
getProtocolVersion(java.lang.String protocol,
long clientVersion) |
org.apache.hadoop.mapred.QueueAclsInfo[] |
getQueueAclsForCurrentUser()
Gets the Queue ACLs for current user
|
org.apache.hadoop.security.authorize.AccessControlList |
getQueueAdmins(java.lang.String queueName)
Get the administrators of the given job-queue.
|
JobQueueInfo |
getQueueInfo(java.lang.String queue)
Gets scheduling information associated with the particular Job queue
|
JobQueueInfo[] |
getQueues()
Gets set of Job Queues associated with the Job Tracker
|
TaskReport[] |
getReduceTaskReports(JobID id)
Grab a bunch of info on the reduce tasks that make up the job
|
TaskReport[] |
getSetupTaskReports(JobID id)
Grab a bunch of info on the setup tasks that make up the job
|
java.lang.String |
getStagingAreaDir()
Get a hint from the JobTracker
where job-specific files are to be placed.
|
java.lang.String |
getSystemDir()
Grab the jobtracker system directory path where job-specific files are to be placed.
|
TaskCompletionEvent[] |
getTaskCompletionEvents(JobID jobid,
int fromEventId,
int maxEvents)
Get task completion events for the jobid, starting from fromEventId.
|
java.lang.String[] |
getTaskDiagnostics(TaskAttemptID taskid)
Returns the diagnostic information for a particular task in the given job.
|
JobStatus[] |
jobsToComplete()
Get the jobs that are not completed and not failed
|
void |
killJob(JobID id)
Kill the indicated job
|
boolean |
killTask(TaskAttemptID taskId,
boolean shouldFail)
Throws
UnsupportedOperationException |
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
Renew an existing delegation token
|
void |
setJobPriority(JobID id,
java.lang.String jp)
Set the priority of the specified job
|
static void |
setLocalMaxRunningMaps(JobContext job,
int maxMaps)
Set the max number of map tasks to run concurrently in the LocalJobRunner.
|
JobStatus |
submitJob(JobID jobid,
java.lang.String jobSubmitDir,
org.apache.hadoop.security.Credentials credentials)
Submit a Job for execution.
|
public static final org.apache.commons.logging.Log LOG
public static final java.lang.String LOCAL_MAX_MAPS
public static final long versionID
public LocalJobRunner(JobConf conf) throws java.io.IOException
java.io.IOException
public long getProtocolVersion(java.lang.String protocol, long clientVersion)
public org.apache.hadoop.ipc.ProtocolSignature getProtocolSignature(java.lang.String protocol, long clientVersion, int clientMethodsHash) throws java.io.IOException
java.io.IOException
public JobID getNewJobId()
public JobStatus submitJob(JobID jobid, java.lang.String jobSubmitDir, org.apache.hadoop.security.Credentials credentials) throws java.io.IOException
java.io.IOException
public void killJob(JobID id)
public void setJobPriority(JobID id, java.lang.String jp) throws java.io.IOException
id
- ID of the jobjp
- Priority to be set for the jobjava.io.IOException
public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws java.io.IOException
UnsupportedOperationException
taskId
- the id of the task to kill.shouldFail
- if true the task is failed and added to failed tasks list, otherwise
it is just killed, w/o affecting job failure status.java.io.IOException
public JobProfile getJobProfile(JobID id)
public TaskReport[] getMapTaskReports(JobID id)
public TaskReport[] getReduceTaskReports(JobID id)
public TaskReport[] getCleanupTaskReports(JobID id)
public TaskReport[] getSetupTaskReports(JobID id)
public JobStatus getJobStatus(JobID id)
public java.lang.String getFilesystemName() throws java.io.IOException
java.io.IOException
public ClusterStatus getClusterStatus(boolean detailed)
detailed
- if true then report tracker names and memory usagepublic JobStatus[] jobsToComplete()
public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid, int fromEventId, int maxEvents) throws java.io.IOException
jobid
- job idfromEventId
- event id to start from.maxEvents
- the max number of events we want to look atjava.io.IOException
public JobStatus[] getAllJobs()
public java.lang.String[] getTaskDiagnostics(TaskAttemptID taskid) throws java.io.IOException
taskid
- the id of the taskjava.io.IOException
public java.lang.String getSystemDir()
JobSubmissionProtocol.getSystemDir()
public org.apache.hadoop.security.authorize.AccessControlList getQueueAdmins(java.lang.String queueName) throws java.io.IOException
java.io.IOException
org.apache.hadoop.mapred.JobSubmissionProtocol#getQueueAdmins()
public java.lang.String getStagingAreaDir() throws java.io.IOException
java.io.IOException
JobSubmissionProtocol.getStagingAreaDir()
public JobStatus[] getJobsFromQueue(java.lang.String queue) throws java.io.IOException
queue
- Queue namejava.io.IOException
public JobQueueInfo[] getQueues() throws java.io.IOException
java.io.IOException
public JobQueueInfo getQueueInfo(java.lang.String queue) throws java.io.IOException
queue
- Queue Namejava.io.IOException
public org.apache.hadoop.mapred.QueueAclsInfo[] getQueueAclsForCurrentUser() throws java.io.IOException
java.io.IOException
public static void setLocalMaxRunningMaps(JobContext job, int maxMaps)
job
- the job to configuremaxMaps
- the maximum number of map tasks to allow.public static int getLocalMaxRunningMaps(JobContext job)
public void cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException, java.lang.InterruptedException
token
- the token to canceljava.io.IOException
java.lang.InterruptedException
public org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer) throws java.io.IOException, java.lang.InterruptedException
renewer
- the user other than the creator (if any) that can renew the
tokenjava.io.IOException
java.lang.InterruptedException
public long renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException, java.lang.InterruptedException
token
- the token to renewjava.io.IOException
java.lang.InterruptedException
Copyright © 2009 The Apache Software Foundation