public class HadoopRDD<K,V> extends RDD<scala.Tuple2<K,V>> implements Logging
org.apache.hadoop.mapred
).
Note: Instantiating this class directly is not recommended, please use
org.apache.spark.SparkContext.hadoopRDD()
param: sc The SparkContext to associate the RDD with. param: broadcastedConf A general Hadoop Configuration, or a subclass of it. If the enclosed variabe references an instance of JobConf, then that JobConf will be used for the Hadoop job. Otherwise, a new JobConf will be created on each slave using the enclosed Configuration. param: initLocalJobConfFuncOpt Optional closure used to initialize any JobConf that HadoopRDD creates. param: inputFormatClass Storage format of the data to be read. param: keyClass Class of the key associated with the inputFormatClass. param: valueClass Class of the value associated with the inputFormatClass. param: minPartitions Minimum number of HadoopRDD partitions (Hadoop Splits) to generate.
Constructor and Description |
---|
HadoopRDD(SparkContext sc,
Broadcast<SerializableWritable<org.apache.hadoop.conf.Configuration>> broadcastedConf,
scala.Option<scala.Function1<org.apache.hadoop.mapred.JobConf,scala.runtime.BoxedUnit>> initLocalJobConfFuncOpt,
Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass,
Class<K> keyClass,
Class<V> valueClass,
int minPartitions) |
HadoopRDD(SparkContext sc,
org.apache.hadoop.mapred.JobConf conf,
Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass,
Class<K> keyClass,
Class<V> valueClass,
int minPartitions) |
Modifier and Type | Method and Description |
---|---|
static void |
addLocalConfiguration(String jobTrackerId,
int jobId,
int splitId,
int attemptId,
org.apache.hadoop.mapred.JobConf conf)
Add Hadoop configuration specific to a single partition and attempt.
|
void |
checkpoint()
Mark this RDD for checkpointing.
|
InterruptibleIterator<scala.Tuple2<K,V>> |
compute(Partition theSplit,
TaskContext context)
:: DeveloperApi ::
Implemented by subclasses to compute a given partition.
|
static Object |
CONFIGURATION_INSTANTIATION_LOCK()
Configuration's constructor is not threadsafe (see SPARK-1097 and HADOOP-10456).
|
static boolean |
containsCachedMetadata(String key) |
static Object |
getCachedMetadata(String key)
The three methods below are helpers for accessing the local map, a property of the SparkEnv of
the local process.
|
org.apache.hadoop.conf.Configuration |
getConf() |
Partition[] |
getPartitions()
Implemented by subclasses to return the set of partitions in this RDD.
|
scala.collection.Seq<String> |
getPreferredLocations(Partition split)
Optionally overridden by subclasses to specify placement preferences.
|
<U> RDD<U> |
mapPartitionsWithInputSplit(scala.Function2<org.apache.hadoop.mapred.InputSplit,scala.collection.Iterator<scala.Tuple2<K,V>>,scala.collection.Iterator<U>> f,
boolean preservesPartitioning,
scala.reflect.ClassTag<U> evidence$1)
Maps over a partition, providing the InputSplit that was used as the base of the partition.
|
HadoopRDD<K,V> |
persist(StorageLevel storageLevel)
Set this RDD's storage level to persist its values across operations after the first time
it is computed.
|
static int |
RECORDS_BETWEEN_BYTES_READ_METRIC_UPDATES()
Update the input bytes read metric each time this number of records has been read
|
static scala.Option<org.apache.spark.rdd.HadoopRDD.SplitInfoReflections> |
SPLIT_INFO_REFLECTIONS() |
aggregate, cache, cartesian, checkpointData, coalesce, collect, collect, context, count, countApprox, countApproxDistinct, countApproxDistinct, countByValue, countByValueApprox, creationSite, dependencies, distinct, distinct, doubleRDDToDoubleRDDFunctions, filter, filterWith, first, flatMap, flatMapWith, fold, foreach, foreachPartition, foreachWith, getCheckpointFile, getStorageLevel, glom, groupBy, groupBy, groupBy, id, intersection, intersection, intersection, isCheckpointed, isEmpty, iterator, keyBy, map, mapPartitions, mapPartitionsWithContext, mapPartitionsWithIndex, mapPartitionsWithSplit, mapWith, max, min, name, numericRDDToDoubleRDDFunctions, partitioner, partitions, persist, pipe, pipe, pipe, preferredLocations, randomSplit, rddToAsyncRDDActions, rddToOrderedRDDFunctions, rddToPairRDDFunctions, rddToSequenceFileRDDFunctions, reduce, repartition, sample, saveAsObjectFile, saveAsTextFile, saveAsTextFile, scope, setName, sortBy, sparkContext, subtract, subtract, subtract, take, takeOrdered, takeSample, toArray, toDebugString, toJavaRDD, toLocalIterator, top, toString, treeAggregate, treeReduce, union, unpersist, zip, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipWithIndex, zipWithUniqueId
initializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public HadoopRDD(SparkContext sc, Broadcast<SerializableWritable<org.apache.hadoop.conf.Configuration>> broadcastedConf, scala.Option<scala.Function1<org.apache.hadoop.mapred.JobConf,scala.runtime.BoxedUnit>> initLocalJobConfFuncOpt, Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass, Class<K> keyClass, Class<V> valueClass, int minPartitions)
public HadoopRDD(SparkContext sc, org.apache.hadoop.mapred.JobConf conf, Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass, Class<K> keyClass, Class<V> valueClass, int minPartitions)
public static Object CONFIGURATION_INSTANTIATION_LOCK()
public static int RECORDS_BETWEEN_BYTES_READ_METRIC_UPDATES()
public static Object getCachedMetadata(String key)
key
- (undocumented)public static boolean containsCachedMetadata(String key)
public static void addLocalConfiguration(String jobTrackerId, int jobId, int splitId, int attemptId, org.apache.hadoop.mapred.JobConf conf)
public static scala.Option<org.apache.spark.rdd.HadoopRDD.SplitInfoReflections> SPLIT_INFO_REFLECTIONS()
public Partition[] getPartitions()
RDD
public InterruptibleIterator<scala.Tuple2<K,V>> compute(Partition theSplit, TaskContext context)
RDD
public <U> RDD<U> mapPartitionsWithInputSplit(scala.Function2<org.apache.hadoop.mapred.InputSplit,scala.collection.Iterator<scala.Tuple2<K,V>>,scala.collection.Iterator<U>> f, boolean preservesPartitioning, scala.reflect.ClassTag<U> evidence$1)
public scala.collection.Seq<String> getPreferredLocations(Partition split)
RDD
split
- (undocumented)public void checkpoint()
RDD
checkpoint
in class RDD<scala.Tuple2<K,V>>
public HadoopRDD<K,V> persist(StorageLevel storageLevel)
RDD
public org.apache.hadoop.conf.Configuration getConf()