public abstract class TaskStatus
extends java.lang.Object
implements org.apache.hadoop.io.Writable, java.lang.Cloneable
Modifier and Type | Class and Description |
---|---|
static class |
TaskStatus.Phase |
static class |
TaskStatus.State |
Constructor and Description |
---|
TaskStatus() |
TaskStatus(TaskAttemptID taskid,
float progress,
int numSlots,
TaskStatus.State runState,
java.lang.String diagnosticInfo,
java.lang.String stateString,
java.lang.String taskTracker,
TaskStatus.Phase phase,
Counters counters) |
Modifier and Type | Method and Description |
---|---|
java.lang.Object |
clone() |
Counters |
getCounters()
Get task's counters.
|
java.lang.String |
getDiagnosticInfo() |
java.util.List<TaskAttemptID> |
getFetchFailedMaps()
Get the list of maps from which output-fetches failed.
|
long |
getFinishTime()
Get task finish time.
|
boolean |
getIncludeAllCounters() |
abstract boolean |
getIsMap() |
protected int |
getMaxStringSize()
Testcases can override
getMaxStringSize() to control the max-size
of strings in TaskStatus . |
org.apache.hadoop.mapred.SortedRanges.Range |
getNextRecordRange()
Get the next record range which is going to be processed by Task.
|
int |
getNumSlots() |
long |
getOutputSize()
Returns the number of bytes of output from this map.
|
TaskStatus.Phase |
getPhase()
Get current phase of this task.
|
float |
getProgress() |
TaskStatus.State |
getRunState() |
long |
getShuffleFinishTime()
Get shuffle finish time for the task.
|
long |
getSortFinishTime()
Get sort finish time for the task,.
|
long |
getStartTime()
Get start time of the task.
|
java.lang.String |
getStateString() |
TaskAttemptID |
getTaskID() |
java.lang.String |
getTaskTracker() |
void |
readFields(java.io.DataInput in) |
void |
setCounters(Counters counters)
Set the task's counters.
|
void |
setDiagnosticInfo(java.lang.String info) |
void |
setIncludeAllCounters(boolean send) |
void |
setNextRecordRange(org.apache.hadoop.mapred.SortedRanges.Range nextRecordRange)
Set the next record range which is going to be processed by Task.
|
void |
setProgress(float progress) |
void |
setRunState(TaskStatus.State runState) |
void |
setStateString(java.lang.String stateString)
Set the state of the
TaskStatus . |
void |
setTaskTracker(java.lang.String tracker) |
void |
write(java.io.DataOutput out) |
public TaskStatus()
public TaskStatus(TaskAttemptID taskid, float progress, int numSlots, TaskStatus.State runState, java.lang.String diagnosticInfo, java.lang.String stateString, java.lang.String taskTracker, TaskStatus.Phase phase, Counters counters)
protected int getMaxStringSize()
getMaxStringSize()
to control the max-size
of strings in TaskStatus
. Note that the TaskStatus
is never
exposed to clients or users (i.e Map or Reduce) and hence users cannot
override this api to pass large strings in TaskStatus
.public TaskAttemptID getTaskID()
public abstract boolean getIsMap()
public int getNumSlots()
public float getProgress()
public void setProgress(float progress)
public TaskStatus.State getRunState()
public java.lang.String getTaskTracker()
public void setTaskTracker(java.lang.String tracker)
public void setRunState(TaskStatus.State runState)
public java.lang.String getDiagnosticInfo()
public void setDiagnosticInfo(java.lang.String info)
public java.lang.String getStateString()
public void setStateString(java.lang.String stateString)
TaskStatus
.public org.apache.hadoop.mapred.SortedRanges.Range getNextRecordRange()
public void setNextRecordRange(org.apache.hadoop.mapred.SortedRanges.Range nextRecordRange)
nextRecordRange
- public long getFinishTime()
public long getShuffleFinishTime()
public long getSortFinishTime()
public long getStartTime()
public TaskStatus.Phase getPhase()
public boolean getIncludeAllCounters()
public void setIncludeAllCounters(boolean send)
public Counters getCounters()
public void setCounters(Counters counters)
counters
- public long getOutputSize()
public java.util.List<TaskAttemptID> getFetchFailedMaps()
public java.lang.Object clone()
clone
in class java.lang.Object
public void write(java.io.DataOutput out) throws java.io.IOException
write
in interface org.apache.hadoop.io.Writable
java.io.IOException
public void readFields(java.io.DataInput in) throws java.io.IOException
readFields
in interface org.apache.hadoop.io.Writable
java.io.IOException
Copyright © 2009 The Apache Software Foundation