public class InsertIntoHiveTable
extends org.apache.spark.sql.execution.SparkPlan
implements org.apache.spark.sql.execution.UnaryExecNode, scala.Product, scala.Serializable
Constructor and Description |
---|
InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table,
scala.collection.immutable.Map<java.lang.String,scala.Option<java.lang.String>> partition,
org.apache.spark.sql.execution.SparkPlan child,
boolean overwrite,
boolean ifNotExists) |
Modifier and Type | Method and Description |
---|---|
static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
allAttributes() |
static BaseType |
apply(int number) |
static java.lang.String |
argString() |
static java.lang.String |
asCode() |
abstract static boolean |
canEqual(java.lang.Object that) |
protected static PlanType |
canonicalized() |
org.apache.spark.sql.execution.SparkPlan |
child() |
static scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan> |
children() |
protected static scala.collection.Seq<java.lang.Object> |
cleanArgs() |
static <B> scala.collection.Seq<B> |
collect(scala.PartialFunction<BaseType,B> pf) |
static <B> scala.Option<B> |
collectFirst(scala.PartialFunction<BaseType,B> pf) |
static org.apache.spark.sql.catalyst.expressions.ExpressionSet |
constraints() |
static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.trees.TreeNode<?>> |
containsChild() |
protected RDD<org.apache.spark.sql.catalyst.InternalRow> |
doExecute() |
protected static <T> Broadcast<T> |
doExecuteBroadcast() |
protected static void |
doPrepare() |
abstract static boolean |
equals(java.lang.Object that) |
static RDD<org.apache.spark.sql.catalyst.InternalRow> |
execute() |
static <T> Broadcast<T> |
executeBroadcast() |
org.apache.spark.sql.catalyst.InternalRow[] |
executeCollect() |
static Row[] |
executeCollectPublic() |
protected static <T> T |
executeQuery(scala.Function0<T> query) |
static org.apache.spark.sql.catalyst.InternalRow[] |
executeTake(int n) |
static scala.collection.Iterator<org.apache.spark.sql.catalyst.InternalRow> |
executeToIterator() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
expressions() |
static boolean |
fastEquals(org.apache.spark.sql.catalyst.trees.TreeNode<?> other) |
static scala.Option<BaseType> |
find(scala.Function1<BaseType,java.lang.Object> f) |
static <A> scala.collection.Seq<A> |
flatMap(scala.Function1<BaseType,scala.collection.TraversableOnce<A>> f) |
static void |
foreach(scala.Function1<BaseType,scala.runtime.BoxedUnit> f) |
static void |
foreachUp(scala.Function1<BaseType,scala.runtime.BoxedUnit> f) |
static scala.collection.mutable.StringBuilder |
generateTreeString(int depth,
scala.collection.Seq<java.lang.Object> lastChildren,
scala.collection.mutable.StringBuilder builder) |
org.apache.hadoop.fs.Path |
getExternalTmpPath(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration hadoopConf) |
org.apache.hadoop.fs.Path |
getExtTmpPathRelTo(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration hadoopConf) |
protected static BaseType |
getNodeNumbered(org.apache.spark.sql.catalyst.trees.MutableInt number) |
protected static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> |
getRelevantConstraints(scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> constraints) |
static int |
hashCode() |
boolean |
ifNotExists() |
void |
initializeLogging(boolean isInterpreter) |
protected static void |
initializeLogIfNecessary(boolean isInterpreter) |
static scala.collection.Seq<PlanType> |
innerChildren() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
inputSet() |
protected static boolean |
isTraceEnabled() |
protected static scala.collection.immutable.List<scala.Tuple2<java.lang.String,org.json4s.JsonAST.JValue>> |
jsonFields() |
org.slf4j.Logger |
log_() |
protected static org.slf4j.Logger |
log() |
protected static void |
logDebug(scala.Function0<java.lang.String> msg) |
protected static void |
logDebug(scala.Function0<java.lang.String> msg,
java.lang.Throwable throwable) |
protected static void |
logError(scala.Function0<java.lang.String> msg) |
protected static void |
logError(scala.Function0<java.lang.String> msg,
java.lang.Throwable throwable) |
protected static void |
logInfo(scala.Function0<java.lang.String> msg) |
protected static void |
logInfo(scala.Function0<java.lang.String> msg,
java.lang.Throwable throwable) |
protected static java.lang.String |
logName() |
protected static void |
logTrace(scala.Function0<java.lang.String> msg) |
protected static void |
logTrace(scala.Function0<java.lang.String> msg,
java.lang.Throwable throwable) |
protected static void |
logWarning(scala.Function0<java.lang.String> msg) |
protected static void |
logWarning(scala.Function0<java.lang.String> msg,
java.lang.Throwable throwable) |
static org.apache.spark.sql.execution.SparkPlan |
makeCopy(java.lang.Object[] newArgs) |
static <A> scala.collection.Seq<A> |
map(scala.Function1<BaseType,A> f) |
static BaseType |
mapChildren(scala.Function1<BaseType,BaseType> f) |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
missingInput() |
protected static org.apache.spark.sql.catalyst.expressions.MutableProjection |
newMutableProjection(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema,
boolean useSubexprElimination) |
protected static boolean |
newMutableProjection$default$3() |
protected static scala.math.Ordering<org.apache.spark.sql.catalyst.InternalRow> |
newNaturalAscendingOrdering(scala.collection.Seq<DataType> dataTypes) |
protected static scala.math.Ordering<org.apache.spark.sql.catalyst.InternalRow> |
newOrdering(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> order,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema) |
protected static scala.Function1<org.apache.spark.sql.catalyst.InternalRow,java.lang.Object> |
newPredicate(org.apache.spark.sql.catalyst.expressions.Expression expression,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema) |
static java.lang.String |
nodeName() |
static java.lang.String |
numberedTreeString() |
static org.apache.spark.sql.catalyst.trees.Origin |
origin() |
protected static scala.collection.Seq<java.lang.Object> |
otherCopyArgs() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> |
outputOrdering() |
static org.apache.spark.sql.catalyst.plans.physical.Partitioning |
outputPartitioning() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
outputSet() |
boolean |
overwrite() |
scala.collection.immutable.Map<java.lang.String,scala.Option<java.lang.String>> |
partition() |
static void |
prepare() |
protected static void |
prepareSubqueries() |
static java.lang.String |
prettyJson() |
static void |
printSchema() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
producedAttributes() |
abstract static int |
productArity() |
abstract static java.lang.Object |
productElement(int n) |
static scala.collection.Iterator<java.lang.Object> |
productIterator() |
static java.lang.String |
productPrefix() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
references() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution> |
requiredChildDistribution() |
static scala.collection.Seq<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>> |
requiredChildOrdering() |
static boolean |
sameResult(PlanType plan) |
static StructType |
schema() |
static java.lang.String |
schemaString() |
protected scala.collection.Seq<org.apache.spark.sql.catalyst.InternalRow> |
sideEffectResult()
Inserts all the rows in the table into Hive.
|
static java.lang.String |
simpleString() |
protected static SparkContext |
sparkContext() |
protected static SQLContext |
sqlContext() |
java.lang.String |
stagingDir() |
protected static java.lang.String |
statePrefix() |
protected static scala.collection.Iterator<java.lang.Object> |
stringArgs() |
static boolean |
subexpressionEliminationEnabled() |
static scala.collection.Seq<PlanType> |
subqueries() |
org.apache.spark.sql.hive.MetastoreRelation |
table() |
static java.lang.String |
toJSON() |
static java.lang.String |
toString() |
static BaseType |
transform(scala.PartialFunction<BaseType,BaseType> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformAllExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
protected static BaseType |
transformChildren(scala.PartialFunction<BaseType,BaseType> rule,
scala.Function2<BaseType,scala.PartialFunction<BaseType,BaseType>,BaseType> nextOperation) |
static BaseType |
transformDown(scala.PartialFunction<BaseType,BaseType> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressionsDown(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressionsUp(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static BaseType |
transformUp(scala.PartialFunction<BaseType,BaseType> rule) |
protected static scala.collection.Seq<BaseType> |
treeChildren() |
static java.lang.String |
treeString() |
protected static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> |
validConstraints() |
protected static void |
waitForSubqueries() |
static BaseType |
withNewChildren(scala.collection.Seq<BaseType> newChildren) |
doExecuteBroadcast, doPrepare, execute, executeBroadcast, executeCollectPublic, executeQuery, executeTake, executeToIterator, initializeLogIfNecessary, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, longMetric, makeCopy, metadata, metrics, newMutableProjection, newMutableProjection$default$3, newNaturalAscendingOrdering, newOrdering, newPredicate, org$apache$spark$internal$Logging$$log__$eq, org$apache$spark$internal$Logging$$log_, org$apache$spark$sql$execution$SparkPlan$$decodeUnsafeRows, org$apache$spark$sql$execution$SparkPlan$$subqueryResults, outputOrdering, outputPartitioning, prepare, prepareSubqueries, requiredChildDistribution, requiredChildOrdering, resetMetrics, sparkContext, sqlContext, subexpressionEliminationEnabled, waitForSubqueries
allAttributes, canonicalized, cleanArgs, constraints, expressions, getRelevantConstraints, innerChildren, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$cleanArg$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$2, org$apache$spark$sql$catalyst$plans$QueryPlan$$scanNullIntolerantExpr, org$apache$spark$sql$catalyst$plans$QueryPlan$$seqToExpressions$1, outputSet, printSchema, producedAttributes, references, sameResult, schema, schemaString, simpleString, statePrefix, subqueries, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp, validConstraints
apply, argString, asCode, children, collect, collectFirst, containsChild, fastEquals, find, flatMap, foreach, foreachUp, fromJSON, generateTreeString, getNodeNumbered, hashCode, jsonFields, map, mapChildren, nodeName, numberedTreeString, org$apache$spark$sql$catalyst$trees$TreeNode$$collectJsonValue$1, org$apache$spark$sql$catalyst$trees$TreeNode$$parseToJson, origin, otherCopyArgs, prettyJson, productIterator, productPrefix, stringArgs, toJSON, toString, transform, transformChildren, transformDown, transformUp, treeChildren, treeString, withNewChildren
clone, equals, finalize, getClass, notify, notifyAll, wait, wait, wait
children, outputPartitioning
public InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table, scala.collection.immutable.Map<java.lang.String,scala.Option<java.lang.String>> partition, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite, boolean ifNotExists)
public abstract static boolean canEqual(java.lang.Object that)
public abstract static boolean equals(java.lang.Object that)
public abstract static java.lang.Object productElement(int n)
public abstract static int productArity()
public static scala.collection.Iterator<java.lang.Object> productIterator()
public static java.lang.String productPrefix()
public static org.apache.spark.sql.catalyst.trees.Origin origin()
public static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.trees.TreeNode<?>> containsChild()
public static int hashCode()
public static boolean fastEquals(org.apache.spark.sql.catalyst.trees.TreeNode<?> other)
public static scala.Option<BaseType> find(scala.Function1<BaseType,java.lang.Object> f)
public static void foreach(scala.Function1<BaseType,scala.runtime.BoxedUnit> f)
public static void foreachUp(scala.Function1<BaseType,scala.runtime.BoxedUnit> f)
public static <A> scala.collection.Seq<A> map(scala.Function1<BaseType,A> f)
public static <A> scala.collection.Seq<A> flatMap(scala.Function1<BaseType,scala.collection.TraversableOnce<A>> f)
public static <B> scala.collection.Seq<B> collect(scala.PartialFunction<BaseType,B> pf)
public static <B> scala.Option<B> collectFirst(scala.PartialFunction<BaseType,B> pf)
public static BaseType mapChildren(scala.Function1<BaseType,BaseType> f)
public static BaseType withNewChildren(scala.collection.Seq<BaseType> newChildren)
public static BaseType transform(scala.PartialFunction<BaseType,BaseType> rule)
public static BaseType transformDown(scala.PartialFunction<BaseType,BaseType> rule)
public static BaseType transformUp(scala.PartialFunction<BaseType,BaseType> rule)
protected static BaseType transformChildren(scala.PartialFunction<BaseType,BaseType> rule, scala.Function2<BaseType,scala.PartialFunction<BaseType,BaseType>,BaseType> nextOperation)
protected static scala.collection.Seq<java.lang.Object> otherCopyArgs()
public static java.lang.String nodeName()
protected static scala.collection.Iterator<java.lang.Object> stringArgs()
public static java.lang.String argString()
public static java.lang.String toString()
public static java.lang.String treeString()
public static java.lang.String numberedTreeString()
public static BaseType apply(int number)
protected static BaseType getNodeNumbered(org.apache.spark.sql.catalyst.trees.MutableInt number)
protected static scala.collection.Seq<BaseType> treeChildren()
public static scala.collection.mutable.StringBuilder generateTreeString(int depth, scala.collection.Seq<java.lang.Object> lastChildren, scala.collection.mutable.StringBuilder builder)
public static java.lang.String asCode()
public static java.lang.String toJSON()
public static java.lang.String prettyJson()
protected static scala.collection.immutable.List<scala.Tuple2<java.lang.String,org.json4s.JsonAST.JValue>> jsonFields()
protected static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> getRelevantConstraints(scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> constraints)
public static org.apache.spark.sql.catalyst.expressions.ExpressionSet constraints()
protected static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.expressions.Expression> validConstraints()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet outputSet()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet references()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet inputSet()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet producedAttributes()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet missingInput()
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressionsDown(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressionsUp(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformAllExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static final scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions()
public static StructType schema()
public static java.lang.String schemaString()
public static void printSchema()
protected static java.lang.String statePrefix()
public static java.lang.String simpleString()
public static scala.collection.Seq<PlanType> subqueries()
public static scala.collection.Seq<PlanType> innerChildren()
protected static PlanType canonicalized()
public static boolean sameResult(PlanType plan)
public static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> allAttributes()
protected static scala.collection.Seq<java.lang.Object> cleanArgs()
protected static java.lang.String logName()
protected static org.slf4j.Logger log()
protected static void logInfo(scala.Function0<java.lang.String> msg)
protected static void logDebug(scala.Function0<java.lang.String> msg)
protected static void logTrace(scala.Function0<java.lang.String> msg)
protected static void logWarning(scala.Function0<java.lang.String> msg)
protected static void logError(scala.Function0<java.lang.String> msg)
protected static void logInfo(scala.Function0<java.lang.String> msg, java.lang.Throwable throwable)
protected static void logDebug(scala.Function0<java.lang.String> msg, java.lang.Throwable throwable)
protected static void logTrace(scala.Function0<java.lang.String> msg, java.lang.Throwable throwable)
protected static void logWarning(scala.Function0<java.lang.String> msg, java.lang.Throwable throwable)
protected static void logError(scala.Function0<java.lang.String> msg, java.lang.Throwable throwable)
protected static boolean isTraceEnabled()
protected static void initializeLogIfNecessary(boolean isInterpreter)
protected static final SQLContext sqlContext()
protected static SparkContext sparkContext()
public static boolean subexpressionEliminationEnabled()
public static org.apache.spark.sql.execution.SparkPlan makeCopy(java.lang.Object[] newArgs)
public static scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution> requiredChildDistribution()
public static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> outputOrdering()
public static scala.collection.Seq<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>> requiredChildOrdering()
public static final RDD<org.apache.spark.sql.catalyst.InternalRow> execute()
public static final <T> Broadcast<T> executeBroadcast()
protected static final <T> T executeQuery(scala.Function0<T> query)
protected static void prepareSubqueries()
protected static void waitForSubqueries()
public static final void prepare()
protected static void doPrepare()
protected static <T> Broadcast<T> doExecuteBroadcast()
public static scala.collection.Iterator<org.apache.spark.sql.catalyst.InternalRow> executeToIterator()
public static Row[] executeCollectPublic()
public static org.apache.spark.sql.catalyst.InternalRow[] executeTake(int n)
protected static org.apache.spark.sql.catalyst.expressions.MutableProjection newMutableProjection(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema, boolean useSubexprElimination)
protected static scala.Function1<org.apache.spark.sql.catalyst.InternalRow,java.lang.Object> newPredicate(org.apache.spark.sql.catalyst.expressions.Expression expression, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema)
protected static scala.math.Ordering<org.apache.spark.sql.catalyst.InternalRow> newOrdering(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> order, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema)
protected static scala.math.Ordering<org.apache.spark.sql.catalyst.InternalRow> newNaturalAscendingOrdering(scala.collection.Seq<DataType> dataTypes)
protected static boolean newMutableProjection$default$3()
public static scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan> children()
public static org.apache.spark.sql.catalyst.plans.physical.Partitioning outputPartitioning()
public org.apache.spark.sql.hive.MetastoreRelation table()
public scala.collection.immutable.Map<java.lang.String,scala.Option<java.lang.String>> partition()
public org.apache.spark.sql.execution.SparkPlan child()
child
in interface org.apache.spark.sql.execution.UnaryExecNode
public boolean overwrite()
public boolean ifNotExists()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<org.apache.spark.sql.execution.SparkPlan>
public java.lang.String stagingDir()
public org.apache.hadoop.fs.Path getExternalTmpPath(org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration hadoopConf)
public org.apache.hadoop.fs.Path getExtTmpPathRelTo(org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration hadoopConf)
protected scala.collection.Seq<org.apache.spark.sql.catalyst.InternalRow> sideEffectResult()
org.apache.hadoop.hive.serde2.SerDe
and the
org.apache.hadoop.mapred.OutputFormat
provided by the table definition.
Note: this is run once and then kept to avoid double insertions.
public org.apache.spark.sql.catalyst.InternalRow[] executeCollect()
executeCollect
in class org.apache.spark.sql.execution.SparkPlan
protected RDD<org.apache.spark.sql.catalyst.InternalRow> doExecute()
doExecute
in class org.apache.spark.sql.execution.SparkPlan
public org.slf4j.Logger log_()
public void initializeLogging(boolean isInterpreter)