Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, _}
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._

/**
Expand All @@ -42,13 +43,13 @@ import org.apache.spark.sql.types._
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SimpleCatalystConf(caseSensitiveAnalysis = true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean) {}
},
new SimpleCatalystConf(caseSensitiveAnalysis = true))
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean) {}
},
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))

/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
Expand Down Expand Up @@ -89,11 +90,11 @@ object AnalysisContext {
*/
class Analyzer(
catalog: SessionCatalog,
conf: CatalystConf,
conf: SQLConf,
maxIterations: Int)
extends RuleExecutor[LogicalPlan] with CheckAnalysis {

def this(catalog: SessionCatalog, conf: CatalystConf) = {
def this(catalog: SessionCatalog, conf: SQLConf) = {
this(catalog, conf, conf.optimizerMaxIterations)
}

Expand Down Expand Up @@ -2331,7 +2332,7 @@ class Analyzer(
}

/**
* Replace [[TimeZoneAwareExpression]] without [[TimeZone]] by its copy with session local
* Replace [[TimeZoneAwareExpression]] without timezone id by its copy with session local
* time zone.
*/
object ResolveTimeZone extends Rule[LogicalPlan] {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@

package org.apache.spark.sql.catalyst.analysis

import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.internal.SQLConf


/**
Expand All @@ -43,7 +43,7 @@ object ResolveHints {
*
* This rule must happen before common table expressions.
*/
class ResolveBroadcastHints(conf: CatalystConf) extends Rule[LogicalPlan] {
class ResolveBroadcastHints(conf: SQLConf) extends Rule[LogicalPlan] {
private val BROADCAST_HINT_NAMES = Set("BROADCAST", "BROADCASTJOIN", "MAPJOIN")

def resolver: Resolver = conf.resolver
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,17 @@ package org.apache.spark.sql.catalyst.analysis

import scala.util.control.NonFatal

import org.apache.spark.sql.catalyst.{CatalystConf, InternalRow}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Cast, TimeZoneAwareExpression}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{StructField, StructType}

/**
* An analyzer rule that replaces [[UnresolvedInlineTable]] with [[LocalRelation]].
*/
case class ResolveInlineTables(conf: CatalystConf) extends Rule[LogicalPlan] {
case class ResolveInlineTables(conf: SQLConf) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case table: UnresolvedInlineTable if table.expressionsResolved =>
validateInputDimension(table)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,17 @@

package org.apache.spark.sql.catalyst.analysis

import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Sort}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.CurrentOrigin.withOrigin
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.IntegerType

/**
* Replaces ordinal in 'order by' or 'group by' with UnresolvedOrdinal expression.
*/
class SubstituteUnresolvedOrdinals(conf: CatalystConf) extends Rule[LogicalPlan] {
class SubstituteUnresolvedOrdinals(conf: SQLConf) extends Rule[LogicalPlan] {
private def isIntLiteral(e: Expression) = e match {
case Literal(_, IntegerType) => true
case _ => false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
package org.apache.spark.sql.catalyst.analysis

import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, View}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf

/**
* This file defines analysis rules related to views.
Expand All @@ -47,7 +47,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
* This should be only done after the batch of Resolution, because the view attributes are not
* completely resolved during the batch of Resolution.
*/
case class AliasViewChild(conf: CatalystConf) extends Rule[LogicalPlan] {
case class AliasViewChild(conf: SQLConf) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case v @ View(desc, output, child) if child.resolved && output != child.output =>
val resolver = conf.resolver
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo}
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, View}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{StructField, StructType}

object SessionCatalog {
Expand All @@ -52,7 +53,7 @@ class SessionCatalog(
val externalCatalog: ExternalCatalog,
globalTempViewManager: GlobalTempViewManager,
functionRegistry: FunctionRegistry,
conf: CatalystConf,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader) extends Logging {
Expand All @@ -63,7 +64,7 @@ class SessionCatalog(
def this(
externalCatalog: ExternalCatalog,
functionRegistry: FunctionRegistry,
conf: CatalystConf) {
conf: SQLConf) {
this(
externalCatalog,
new GlobalTempViewManager("global_temp"),
Expand All @@ -79,7 +80,7 @@ class SessionCatalog(
this(
externalCatalog,
new SimpleFunctionRegistry,
SimpleCatalystConf(caseSensitiveAnalysis = true))
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
}

/** List of temporary tables, mapping from table name to their logical plan. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,13 @@ import scala.collection.mutable
import com.google.common.base.Objects

import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{CatalystConf, FunctionIdentifier, InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, Cast, Literal}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.catalyst.util.quoteIdentifier
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType


Expand Down Expand Up @@ -425,7 +426,7 @@ case class CatalogRelation(
/** Only compare table identifier. */
override lazy val cleanArgs: Seq[Any] = Seq(tableMeta.identifier)

override def computeStats(conf: CatalystConf): Statistics = {
override def computeStats(conf: SQLConf): Statistics = {
// For data source tables, we will create a `LogicalRelation` and won't call this method, for
// hive serde tables, we will always generate a statistics.
// TODO: unify the table stats generation.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,21 @@ package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable

import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{CatalystConf, SimpleCatalystConf}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._

/**
* Abstract class all optimizers should inherit of, contains the standard batches (extending
* Optimizers can override this.
*/
abstract class Optimizer(sessionCatalog: SessionCatalog, conf: CatalystConf)
abstract class Optimizer(sessionCatalog: SessionCatalog, conf: SQLConf)
extends RuleExecutor[LogicalPlan] {

protected val fixedPoint = FixedPoint(conf.optimizerMaxIterations)
Expand Down Expand Up @@ -160,8 +160,8 @@ class SimpleTestOptimizer extends Optimizer(
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SimpleCatalystConf(caseSensitiveAnalysis = true)),
new SimpleCatalystConf(caseSensitiveAnalysis = true))
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)),
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))

/**
* Remove redundant aliases from a query plan. A redundant alias is an alias that does not change
Expand Down Expand Up @@ -270,7 +270,7 @@ object RemoveRedundantProject extends Rule[LogicalPlan] {
/**
* Pushes down [[LocalLimit]] beneath UNION ALL and beneath the streamed inputs of outer joins.
*/
case class LimitPushDown(conf: CatalystConf) extends Rule[LogicalPlan] {
case class LimitPushDown(conf: SQLConf) extends Rule[LogicalPlan] {

private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = {
plan match {
Expand Down Expand Up @@ -617,7 +617,7 @@ object CollapseWindow extends Rule[LogicalPlan] {
* Note: While this optimization is applicable to all types of join, it primarily benefits Inner and
* LeftSemi joins.
*/
case class InferFiltersFromConstraints(conf: CatalystConf)
case class InferFiltersFromConstraints(conf: SQLConf)
extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = if (conf.constraintPropagationEnabled) {
inferFilters(plan)
Expand Down Expand Up @@ -715,7 +715,7 @@ object EliminateSorts extends Rule[LogicalPlan] {
* 2) by substituting a dummy empty relation when the filter will always evaluate to `false`.
* 3) by eliminating the always-true conditions given the constraints on the child's output.
*/
case class PruneFilters(conf: CatalystConf) extends Rule[LogicalPlan] with PredicateHelper {
case class PruneFilters(conf: SQLConf) extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// If the filter condition always evaluate to true, remove the filter.
case Filter(Literal(true, BooleanType), child) => child
Expand Down Expand Up @@ -1057,7 +1057,7 @@ object CombineLimits extends Rule[LogicalPlan] {
* the join between R and S is not a cartesian product and therefore should be allowed.
* The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule.
*/
case class CheckCartesianProducts(conf: CatalystConf)
case class CheckCartesianProducts(conf: SQLConf)
extends Rule[LogicalPlan] with PredicateHelper {
/**
* Check if a join is a cartesian product. Returns true if
Expand Down Expand Up @@ -1092,7 +1092,7 @@ case class CheckCartesianProducts(conf: CatalystConf)
* This uses the same rules for increasing the precision and scale of the output as
* [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]].
*/
case class DecimalAggregates(conf: CatalystConf) extends Rule[LogicalPlan] {
case class DecimalAggregates(conf: SQLConf) extends Rule[LogicalPlan] {
import Decimal.MAX_LONG_DIGITS

/** Maximum number of decimal digits representable precisely in a Double */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ package org.apache.spark.sql.catalyst.optimizer

import scala.collection.immutable.HashSet

import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._

/*
Expand Down Expand Up @@ -115,7 +115,7 @@ object ReorderAssociativeOperator extends Rule[LogicalPlan] {
* 2. Replaces [[In (value, seq[Literal])]] with optimized version
* [[InSet (value, HashSet[Literal])]] which is much faster.
*/
case class OptimizeIn(conf: CatalystConf) extends Rule[LogicalPlan] {
case class OptimizeIn(conf: SQLConf) extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case expr @ In(v, list) if expr.inSetConvertible =>
Expand Down Expand Up @@ -346,7 +346,7 @@ object LikeSimplification extends Rule[LogicalPlan] {
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
case class NullPropagation(conf: CatalystConf) extends Rule[LogicalPlan] {
case class NullPropagation(conf: SQLConf) extends Rule[LogicalPlan] {
private def isNullLiteral(e: Expression): Boolean = e match {
case Literal(null, _) => true
case _ => false
Expand Down Expand Up @@ -482,7 +482,7 @@ object FoldablePropagation extends Rule[LogicalPlan] {
/**
* Optimizes expressions by replacing according to CodeGen configuration.
*/
case class OptimizeCodegen(conf: CatalystConf) extends Rule[LogicalPlan] {
case class OptimizeCodegen(conf: SQLConf) extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case e: CaseWhen if canCodegen(e) => e.toCodegen()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package org.apache.spark.sql.catalyst.optimizer

import scala.annotation.tailrec

import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.{ExtractFiltersAndInnerJoins, PhysicalOperation}
import org.apache.spark.sql.catalyst.plans._
Expand Down Expand Up @@ -440,7 +439,7 @@ case class ReorderJoin(conf: SQLConf) extends Rule[LogicalPlan] with PredicateHe
*
* This rule should be executed before pushing down the Filter
*/
case class EliminateOuterJoin(conf: CatalystConf) extends Rule[LogicalPlan] with PredicateHelper {
case class EliminateOuterJoin(conf: SQLConf) extends Rule[LogicalPlan] with PredicateHelper {

/**
* Returns whether the expression returns null or false when all inputs are nulls.
Expand Down
Loading