public class HiveScriptIOSchema
extends Object
implements scala.Product, scala.Serializable
Constructor and Description |
---|
HiveScriptIOSchema(scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat,
scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat,
scala.Option<String> inputSerdeClass,
scala.Option<String> outputSerdeClass,
scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps,
scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps,
scala.Option<String> recordReaderClass,
scala.Option<String> recordWriterClass,
boolean schemaLess) |
Modifier and Type | Method and Description |
---|---|
static HiveScriptIOSchema |
apply(org.apache.spark.sql.catalyst.plans.logical.ScriptInputOutputSchema input) |
abstract static boolean |
canEqual(Object that) |
abstract static boolean |
equals(Object that) |
scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector>> |
initInputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> input) |
scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> |
initOutputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output) |
scala.collection.Seq<scala.Tuple2<String,String>> |
inputRowFormat() |
scala.collection.immutable.Map<String,String> |
inputRowFormatMap() |
scala.Option<String> |
inputSerdeClass() |
scala.collection.Seq<scala.Tuple2<String,String>> |
inputSerdeProps() |
static DataType |
inspectorToDataType(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector inspector) |
static DataType |
javaTypeToDataType(java.lang.reflect.Type clz) |
scala.collection.Seq<scala.Tuple2<String,String>> |
outputRowFormat() |
scala.collection.immutable.Map<String,String> |
outputRowFormatMap() |
scala.Option<String> |
outputSerdeClass() |
scala.collection.Seq<scala.Tuple2<String,String>> |
outputSerdeProps() |
abstract static int |
productArity() |
abstract static Object |
productElement(int n) |
static scala.collection.Iterator<Object> |
productIterator() |
static String |
productPrefix() |
scala.Option<org.apache.hadoop.hive.ql.exec.RecordReader> |
recordReader(java.io.InputStream inputStream,
org.apache.hadoop.conf.Configuration conf) |
scala.Option<String> |
recordReaderClass() |
scala.Option<org.apache.hadoop.hive.ql.exec.RecordWriter> |
recordWriter(java.io.OutputStream outputStream,
org.apache.hadoop.conf.Configuration conf) |
scala.Option<String> |
recordWriterClass() |
boolean |
schemaLess() |
static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector |
toInspector(DataType dataType) |
static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector |
toInspector(org.apache.spark.sql.catalyst.expressions.Expression expr) |
static org.apache.spark.sql.hive.HiveInspectors.typeInfoConversions |
typeInfoConversions(DataType dt) |
static scala.Function1<Object,Object> |
unwrapperFor(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector objectInspector) |
static scala.Function3<Object,org.apache.spark.sql.catalyst.InternalRow,Object,scala.runtime.BoxedUnit> |
unwrapperFor(org.apache.hadoop.hive.serde2.objectinspector.StructField field) |
static Object[] |
wrap(org.apache.spark.sql.catalyst.InternalRow row,
scala.Function1<Object,Object>[] wrappers,
Object[] cache,
DataType[] dataTypes) |
static Object |
wrap(Object a,
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector oi,
DataType dataType) |
static Object[] |
wrap(scala.collection.Seq<Object> row,
scala.Function1<Object,Object>[] wrappers,
Object[] cache,
DataType[] dataTypes) |
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
public HiveScriptIOSchema(scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat, scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat, scala.Option<String> inputSerdeClass, scala.Option<String> outputSerdeClass, scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps, scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps, scala.Option<String> recordReaderClass, scala.Option<String> recordWriterClass, boolean schemaLess)
public static HiveScriptIOSchema apply(org.apache.spark.sql.catalyst.plans.logical.ScriptInputOutputSchema input)
public static DataType javaTypeToDataType(java.lang.reflect.Type clz)
public static scala.Function1<Object,Object> unwrapperFor(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector objectInspector)
public static scala.Function3<Object,org.apache.spark.sql.catalyst.InternalRow,Object,scala.runtime.BoxedUnit> unwrapperFor(org.apache.hadoop.hive.serde2.objectinspector.StructField field)
public static Object wrap(Object a, org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector oi, DataType dataType)
public static Object[] wrap(org.apache.spark.sql.catalyst.InternalRow row, scala.Function1<Object,Object>[] wrappers, Object[] cache, DataType[] dataTypes)
public static Object[] wrap(scala.collection.Seq<Object> row, scala.Function1<Object,Object>[] wrappers, Object[] cache, DataType[] dataTypes)
public static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector toInspector(DataType dataType)
public static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector toInspector(org.apache.spark.sql.catalyst.expressions.Expression expr)
public static DataType inspectorToDataType(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector inspector)
public static org.apache.spark.sql.hive.HiveInspectors.typeInfoConversions typeInfoConversions(DataType dt)
public abstract static boolean canEqual(Object that)
public abstract static boolean equals(Object that)
public abstract static Object productElement(int n)
public abstract static int productArity()
public static scala.collection.Iterator<Object> productIterator()
public static String productPrefix()
public scala.collection.Seq<scala.Tuple2<String,String>> inputRowFormat()
public scala.collection.Seq<scala.Tuple2<String,String>> outputRowFormat()
public scala.Option<String> inputSerdeClass()
public scala.Option<String> outputSerdeClass()
public scala.collection.Seq<scala.Tuple2<String,String>> inputSerdeProps()
public scala.collection.Seq<scala.Tuple2<String,String>> outputSerdeProps()
public scala.Option<String> recordReaderClass()
public scala.Option<String> recordWriterClass()
public boolean schemaLess()
public scala.collection.immutable.Map<String,String> inputRowFormatMap()
public scala.collection.immutable.Map<String,String> outputRowFormatMap()
public scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector>> initInputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> input)
public scala.Option<scala.Tuple2<org.apache.hadoop.hive.serde2.AbstractSerDe,org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector>> initOutputSerDe(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output)
public scala.Option<org.apache.hadoop.hive.ql.exec.RecordReader> recordReader(java.io.InputStream inputStream, org.apache.hadoop.conf.Configuration conf)
public scala.Option<org.apache.hadoop.hive.ql.exec.RecordWriter> recordWriter(java.io.OutputStream outputStream, org.apache.hadoop.conf.Configuration conf)