diff --git a/fe/check/checkstyle/checkstyle.xml b/fe/check/checkstyle/checkstyle.xml index 1bd1436b667721..b941c376b71aa1 100644 --- a/fe/check/checkstyle/checkstyle.xml +++ b/fe/check/checkstyle/checkstyle.xml @@ -94,16 +94,17 @@ under the License. - + + + + @@ -131,6 +134,7 @@ under the License. value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT, INSTANCE_INIT, ANNOTATION_DEF, ENUM_DEF, INTERFACE_DEF, RECORD_DEF, COMPACT_CTOR_DEF"/> + @@ -153,18 +157,27 @@ under the License. value="\\u00(09|0(a|A)|0(c|C)|0(d|D)|22|27|5(C|c))|\\(0(10|11|12|14|15|42|47)|134)"/> + - + + + - + + + - - + + + + + + @@ -244,6 +257,7 @@ under the License. + @@ -252,8 +266,11 @@ under the License. + + + + - @@ -376,11 +393,13 @@ under the License. value="GenericWhitespace ''{0}'' should followed by whitespace."/> + + @@ -391,14 +410,18 @@ under the License. value="COMMA, SEMI, POST_INC, POST_DEC, DOT, LABELED_STAT, METHOD_REF"/> + + + + - + + @@ -439,6 +463,7 @@ under the License. + @@ -455,6 +480,7 @@ under the License. LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS, MINUS_ASSIGN, MOD, MOD_ASSIGN, NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION, RCURLY, SL, SLIST, SL_ASSIGN, SR, SR_ASSIGN, STAR, STAR_ASSIGN, LITERAL_ASSERT, TYPE_EXTENSION_AND"/> + >>= HLL_COLUMN_PRECISION; hashValue |= (1L << HLL_ZERO_COUNT_BITS); byte firstOneBit = (byte) (getLongTailZeroNum(hashValue) + 1); - registers[idx] = registers[idx] > firstOneBit ? registers[idx] : firstOneBit ; + registers[idx] = registers[idx] > firstOneBit ? registers[idx] : firstOneBit; } private void mergeRegisters(byte[] other) { @@ -217,7 +217,7 @@ public void serialize(DataOutput output) throws IOException { output.writeInt(Integer.reverseBytes(nonZeroRegisterNum)); for (int i = 0; i < HLL_REGISTERS_COUNT; i++) { if (registers[i] != 0) { - output.writeShort(Short.reverseBytes((short)i)); + output.writeShort(Short.reverseBytes((short) i)); output.writeByte(registers[i]); } } @@ -303,20 +303,20 @@ public strictfp long estimateCardinality() { double estimate = alpha * numStreams * numStreams * harmonicMean; if (estimate <= numStreams * 2.5 && numZeroRegisters != 0) { - estimate = numStreams * Math.log(((float)numStreams) / ((float)numZeroRegisters)); + estimate = numStreams * Math.log(((float) numStreams) / ((float) numZeroRegisters)); } else if (numStreams == 16384 && estimate < 72000) { double bias = 5.9119 * 1.0e-18 * (estimate * estimate * estimate * estimate) - - 1.4253 * 1.0e-12 * (estimate * estimate * estimate) + - 1.2940 * 1.0e-7 * (estimate * estimate) - - 5.2921 * 1.0e-3 * estimate + - 83.3216; + - 1.4253 * 1.0e-12 * (estimate * estimate * estimate) + + 1.2940 * 1.0e-7 * (estimate * estimate) + - 5.2921 * 1.0e-3 * estimate + + 83.3216; estimate -= estimate * (bias / 100); } - return (long)(estimate + 0.5); + return (long) (estimate + 0.5); } - public int maxSerializedSize () { + public int maxSerializedSize() { switch (type) { case HLL_DATA_EMPTY: default: @@ -335,14 +335,14 @@ public int maxSerializedSize () { private static long getLittleEndianLong(final byte[] data, final int index) { - return (((long) data[index ] & 0xff) ) | - (((long) data[index + 1] & 0xff) << 8) | - (((long) data[index + 2] & 0xff) << 16) | - (((long) data[index + 3] & 0xff) << 24) | - (((long) data[index + 4] & 0xff) << 32) | - (((long) data[index + 5] & 0xff) << 40) | - (((long) data[index + 6] & 0xff) << 48) | - (((long) data[index + 7] & 0xff) << 56); + return (((long) data[index ] & 0xff)) + | (((long) data[index + 1] & 0xff) << 8) + | (((long) data[index + 2] & 0xff) << 16) + | (((long) data[index + 3] & 0xff) << 24) + | (((long) data[index + 4] & 0xff) << 32) + | (((long) data[index + 5] & 0xff) << 40) + | (((long) data[index + 6] & 0xff) << 48) + | (((long) data[index + 7] & 0xff) << 56); } public static long hash64(final byte[] data, final int length, final int seed) { diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java index 137315eddf7956..ffd2330bd30be3 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java @@ -100,6 +100,7 @@ public static long copyBytes(InputStream in, OutputStream out, try { Thread.sleep(sleepTime); } catch (InterruptedException ie) { + // CHECKSTYLE IGNORE THIS LINE } } } @@ -250,6 +251,7 @@ public static void closeSocket(Socket sock) { try { sock.close(); } catch (IOException ignored) { + // CHECKSTYLE IGNORE THIS LINE } } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java index 648f2e49eeddb2..e71af2dbbd0f1f 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java @@ -72,39 +72,35 @@ public void flush() throws IOException { public void write(byte[] b, int off, int len) throws IOException { long sleepTime = 0; long curTime = 0; - try { - if ((off | len | (off + len) | (b.length - (off + len))) < 0) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { - return; - } + if ((off | len | (off + len) | (b.length - (off + len))) < 0) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } - if (speed > 0 && !bstart) { - startTime = System.currentTimeMillis(); - bstart = true; - } - long resetTime = System.currentTimeMillis(); - if (resetTime - startTime > 1000) { - bytesWriteTotal = 0; - startTime = resetTime; - } - out.write(b, off, len); - if (len >= 0) { - bytesWriteTotal += len; - if (speed > 0) { - curTime = System.currentTimeMillis(); - sleepTime = bytesWriteTotal / speed * 1000 - - (curTime - startTime); - if (sleepTime > 0) { - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ie) { - LOG.warn("Thread sleep is interrupted"); - } + if (speed > 0 && !bstart) { + startTime = System.currentTimeMillis(); + bstart = true; + } + long resetTime = System.currentTimeMillis(); + if (resetTime - startTime > 1000) { + bytesWriteTotal = 0; + startTime = resetTime; + } + out.write(b, off, len); + if (len >= 0) { + bytesWriteTotal += len; + if (speed > 0) { + curTime = System.currentTimeMillis(); + sleepTime = bytesWriteTotal / speed * 1000 - (curTime - startTime); + if (sleepTime > 0) { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException ie) { + LOG.warn("Thread sleep is interrupted"); } } } - } finally { } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java index a8977e5ed7f9d0..7331195d326060 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java @@ -470,65 +470,65 @@ public static void validateUTF8(byte[] utf8, int start, int len) int aByte = ((int) utf8[count] & 0xFF); switch (state) { // CHECKSTYLE IGNORE THIS LINE: missing switch default - case LEAD_BYTE: - leadByte = aByte; - length = bytesFromUTF8[aByte]; + case LEAD_BYTE: + leadByte = aByte; + length = bytesFromUTF8[aByte]; + + switch (length) { + case 0: // check for ASCII + if (leadByte > 0x7F) { + throw new MalformedInputException(count); + } + break; + case 1: + if (leadByte < 0xC2 || leadByte > 0xDF) { + throw new MalformedInputException(count); + } + state = TRAIL_BYTE_1; + break; + case 2: + if (leadByte < 0xE0 || leadByte > 0xEF) { + throw new MalformedInputException(count); + } + state = TRAIL_BYTE_1; + break; + case 3: + if (leadByte < 0xF0 || leadByte > 0xF4) { + throw new MalformedInputException(count); + } + state = TRAIL_BYTE_1; + break; + default: + // too long! Longest valid UTF-8 is 4 bytes (lead + three) + // or if < 0 we got a trail byte in the lead byte position + throw new MalformedInputException(count); + } // switch (length) + break; - switch (length) { - case 0: // check for ASCII - if (leadByte > 0x7F) { + case TRAIL_BYTE_1: + if (leadByte == 0xF0 && aByte < 0x90) { throw new MalformedInputException(count); } - break; - case 1: - if (leadByte < 0xC2 || leadByte > 0xDF) { + if (leadByte == 0xF4 && aByte > 0x8F) { throw new MalformedInputException(count); } - state = TRAIL_BYTE_1; - break; - case 2: - if (leadByte < 0xE0 || leadByte > 0xEF) { + if (leadByte == 0xE0 && aByte < 0xA0) { throw new MalformedInputException(count); } - state = TRAIL_BYTE_1; - break; - case 3: - if (leadByte < 0xF0 || leadByte > 0xF4) { + if (leadByte == 0xED && aByte > 0x9F) { + throw new MalformedInputException(count); + } + // falls through to regular trail-byte test!! + case TRAIL_BYTE: + if (aByte < 0x80 || aByte > 0xBF) { throw new MalformedInputException(count); } - state = TRAIL_BYTE_1; + if (--length == 0) { + state = LEAD_BYTE; + } else { + state = TRAIL_BYTE; + } break; - default: - // too long! Longest valid UTF-8 is 4 bytes (lead + three) - // or if < 0 we got a trail byte in the lead byte position - throw new MalformedInputException(count); - } // switch (length) - break; - - case TRAIL_BYTE_1: - if (leadByte == 0xF0 && aByte < 0x90) { - throw new MalformedInputException(count); - } - if (leadByte == 0xF4 && aByte > 0x8F) { - throw new MalformedInputException(count); - } - if (leadByte == 0xE0 && aByte < 0xA0) { - throw new MalformedInputException(count); - } - if (leadByte == 0xED && aByte > 0x9F) { - throw new MalformedInputException(count); - } - // falls through to regular trail-byte test!! - case TRAIL_BYTE: - if (aByte < 0x80 || aByte > 0xBF) { - throw new MalformedInputException(count); - } - if (--length == 0) { - state = LEAD_BYTE; - } else { - state = TRAIL_BYTE; - } - break; } // switch (state) count++; } @@ -577,29 +577,29 @@ public static int bytesToCodePoint(ByteBuffer bytes) { int ch = 0; switch (extraBytesToRead) { // CHECKSTYLE IGNORE THIS LINE: missing switch default - case 5: - ch += (bytes.get() & 0xFF); - ch <<= 6; /* remember, illegal UTF-8 */ - // CHECKSTYLE IGNORE THIS LINE: fall through - case 4: - ch += (bytes.get() & 0xFF); - ch <<= 6; /* remember, illegal UTF-8 */ - // CHECKSTYLE IGNORE THIS LINE: fall through - case 3: - ch += (bytes.get() & 0xFF); - ch <<= 6; - // CHECKSTYLE IGNORE THIS LINE: fall through - case 2: - ch += (bytes.get() & 0xFF); - ch <<= 6; - // CHECKSTYLE IGNORE THIS LINE: fall through - case 1: - ch += (bytes.get() & 0xFF); - ch <<= 6; - // CHECKSTYLE IGNORE THIS LINE: fall through - case 0: - ch += (bytes.get() & 0xFF); - // CHECKSTYLE IGNORE THIS LINE: fall through, missing switch default + case 5: + ch += (bytes.get() & 0xFF); + ch <<= 6; /* remember, illegal UTF-8 */ + // CHECKSTYLE IGNORE THIS LINE: fall through + case 4: + ch += (bytes.get() & 0xFF); + ch <<= 6; /* remember, illegal UTF-8 */ + // CHECKSTYLE IGNORE THIS LINE: fall through + case 3: + ch += (bytes.get() & 0xFF); + ch <<= 6; + // CHECKSTYLE IGNORE THIS LINE: fall through + case 2: + ch += (bytes.get() & 0xFF); + ch <<= 6; + // CHECKSTYLE IGNORE THIS LINE: fall through + case 1: + ch += (bytes.get() & 0xFF); + ch <<= 6; + // CHECKSTYLE IGNORE THIS LINE: fall through + case 0: + ch += (bytes.get() & 0xFF); + // CHECKSTYLE IGNORE THIS LINE: fall through, missing switch default } ch -= offsetsFromUTF8[extraBytesToRead]; diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/AutoType.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/AutoType.java index 3b30b04c85b179..eeebc76e7e5816 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/AutoType.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/AutoType.java @@ -31,11 +31,11 @@ public static boolean isWrapperOfPrimitiveType(Class type) { } public static Class getPrimitiveType(Class wrapperType) { - return (Class)WRAPPER_TO_PRIMITIVE.get(wrapperType); + return WRAPPER_TO_PRIMITIVE.get(wrapperType); } public static Class getWrapperType(Class primitiveType) { - return (Class)PRIMITIVE_TO_WRAPPER.get(primitiveType); + return PRIMITIVE_TO_WRAPPER.get(primitiveType); } static { diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java index e270078739fc1f..1636554fbc8e11 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java @@ -36,9 +36,9 @@ public static T invoke(Constructor constructor, Object... initArgs) { } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (cause instanceof Error) { - throw (Error)cause; + throw (Error) cause; } else if (cause instanceof RuntimeException) { - throw (RuntimeException)cause; + throw (RuntimeException) cause; } else { throw new IllegalStateException("Should never get here", cause); } @@ -62,7 +62,7 @@ public static T newInstance(Class aClass, Object... nonNullArgs * invoke the constructor with no parameters of {@aClass Class}. */ private static T newInstance(Class aClass) { - return (T) newInstance((Class)aClass, ParameterReflection.NO_PARAMETERS); + return (T) newInstance((Class) aClass, ParameterReflection.NO_PARAMETERS); } /** @@ -124,12 +124,12 @@ private static Constructor findCompatibleConstructor(Class theClass, C Constructor[] declaredConstructors = theClass.getDeclaredConstructors(); Constructor[] declaredConstructorsArray = declaredConstructors; - for(Constructor declaredConstructor : declaredConstructorsArray) { + for (Constructor declaredConstructor : declaredConstructorsArray) { Class[] declaredParamTypes = declaredConstructor.getParameterTypes(); int gap = declaredParamTypes.length - argTypes.length; - if (gap == 0 && (ParameterReflection.matchesParameterTypes(declaredParamTypes, argTypes) || - ParameterReflection.acceptsArgumentTypes(declaredParamTypes, argTypes) ) && - (found == null || ParameterReflection.hasMoreSpecificTypes(declaredParamTypes, foundParameters))) { + if (gap == 0 && (ParameterReflection.matchesParameterTypes(declaredParamTypes, argTypes) + || ParameterReflection.acceptsArgumentTypes(declaredParamTypes, argTypes)) + && (found == null || ParameterReflection.hasMoreSpecificTypes(declaredParamTypes, foundParameters))) { found = (Constructor) declaredConstructor; foundParameters = declaredParamTypes; } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/Deencapsulation.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/Deencapsulation.java index 4aa1e988afec51..5fb33717e00222 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/Deencapsulation.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/Deencapsulation.java @@ -21,11 +21,11 @@ public static T getField(Object objectWithField, Class fieldType) { } public static T getField(Class classWithStaticField, String fieldName) { - return FieldReflection.getField(classWithStaticField, fieldName, (Object)null); + return FieldReflection.getField(classWithStaticField, fieldName, null); } public static T getField(Class classWithStaticField, Class fieldType) { - return FieldReflection.getField(classWithStaticField, fieldType, (Object)null); + return FieldReflection.getField(classWithStaticField, fieldType, null); } public static void setField(Object objectWithField, String fieldName, Object fieldValue) { @@ -33,15 +33,15 @@ public static void setField(Object objectWithField, String fieldName, Object fie } public static void setField(Object objectWithField, Object fieldValue) { - FieldReflection.setField(objectWithField.getClass(), objectWithField, (String)null, fieldValue); + FieldReflection.setField(objectWithField.getClass(), objectWithField, null, fieldValue); } public static void setField(Class classWithStaticField, String fieldName, Object fieldValue) { - FieldReflection.setField(classWithStaticField, (Object)null, fieldName, fieldValue); + FieldReflection.setField(classWithStaticField, null, fieldName, fieldValue); } public static void setField(Class classWithStaticField, Object fieldValue) { - FieldReflection.setField(classWithStaticField, (Object)null, (String)null, fieldValue); + FieldReflection.setField(classWithStaticField, null, null, fieldValue); } public static T invoke(Object objectWithMethod, String methodName, Object... nonNullArgs) { @@ -50,7 +50,7 @@ public static T invoke(Object objectWithMethod, String methodName, Object... } public static T invoke(Class classWithStaticMethod, String methodName, Object... nonNullArgs) { - return MethodReflection.invoke(classWithStaticMethod, (Object)null, methodName, nonNullArgs); + return MethodReflection.invoke(classWithStaticMethod, null, methodName, nonNullArgs); } public static T newInstance(Class classToInstantiate, Object... nonNullArgs) { diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java index e7aaae6daf6646..1974d4f53b4f43 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java @@ -140,7 +140,7 @@ private static Field getDeclaredFieldInSingleClass(Class theClass, Type desir Field found = null; Field[] fields = theClass.getDeclaredFields(); - for(Field field : fields) { + for (Field field : fields) { if (!field.isSynthetic()) { Type fieldType = field.getGenericType(); if (instanceField != Modifier.isStatic(field.getModifiers()) && isCompatibleFieldType(fieldType, desiredType, forAssignment)) { @@ -178,9 +178,10 @@ private static boolean isCompatibleFieldType(Type fieldType, Type desiredType, b } private static String errorMessageForMoreThanOneFieldFound(Type desiredFieldType, boolean instanceField, boolean forAssignment, Field firstField, Field secondField) { - return "More than one " + (instanceField ? "instance" : "static") + " field " + (forAssignment ? "to" : "from") + " which a value of type " + - getTypeName(desiredFieldType) + (forAssignment ? " can be assigned" : " can be read") + " exists in " + - secondField.getDeclaringClass() + ": " + firstField.getName() + ", " + secondField.getName(); + return "More than one " + (instanceField ? "instance" : "static") + " field " + (forAssignment ? "to" : "from") + + " which a value of type " + + getTypeName(desiredFieldType) + (forAssignment ? " can be assigned" : " can be read") + " exists in " + + secondField.getDeclaringClass() + ": " + firstField.getName() + ", " + secondField.getName(); } private static String getTypeName(Type type) { @@ -207,7 +208,7 @@ private static T getFieldValue(Field field, Object targetObject) { makeAccessible(field); try { - return (T)field.get(targetObject); + return (T) field.get(targetObject); } catch (IllegalAccessException e) { throw new RuntimeException(e); } @@ -254,19 +255,19 @@ private static void setStaticFinalField(Field field, Object value) throws Illega */ public static Class getClassType(Type declaredType) { - while(!(declaredType instanceof Class)) { + while (!(declaredType instanceof Class)) { if (declaredType instanceof ParameterizedType) { - return (Class)((ParameterizedType)declaredType).getRawType(); + return (Class) ((ParameterizedType) declaredType).getRawType(); } if (!(declaredType instanceof TypeVariable)) { throw new IllegalArgumentException("Type of unexpected kind: " + declaredType); } - declaredType = ((TypeVariable)declaredType).getBounds()[0]; + declaredType = ((TypeVariable) declaredType).getBounds()[0]; } - return (Class)declaredType; + return (Class) declaredType; } // ensure that field is accessible @@ -278,9 +279,9 @@ public static void makeAccessible(AccessibleObject classMember) { // return true if the two types are same type. private static boolean isSameType(Class firstType, Class secondType) { - return firstType == secondType || - firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) || - secondType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType); + return firstType == secondType + || firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) + || secondType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType); } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/MethodReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/MethodReflection.java index f81e957a063470..47bd5e5f2ad6c4 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/MethodReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/MethodReflection.java @@ -48,11 +48,11 @@ public static T invoke(Object targetInstance, Method method, Object... metho } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (cause instanceof Error) { - throw (Error)cause; + throw (Error) cause; } else if (cause instanceof RuntimeException) { - throw (RuntimeException)cause; + throw (RuntimeException) cause; } else { - ThrowOfCheckedException.doThrow((Exception)cause); + ThrowOfCheckedException.doThrow((Exception) cause); return null; } } @@ -103,13 +103,14 @@ private static Method findCompatibleMethodInClass(Class theClass, String meth Class[] foundParamTypes = null; Method[] methods = theClass.getDeclaredMethods(); - for(Method declaredMethod : methods) { + for (Method declaredMethod : methods) { if (declaredMethod.getName().equals(methodName)) { Class[] declaredParamTypes = declaredMethod.getParameterTypes(); int gap = declaredParamTypes.length - argTypes.length; - if (gap == 0 && (ParameterReflection.matchesParameterTypes(declaredParamTypes, argTypes) || - ParameterReflection.acceptsArgumentTypes(declaredParamTypes, argTypes) ) && - (foundParamTypes == null || ParameterReflection.hasMoreSpecificTypes(declaredParamTypes, foundParamTypes))) { + if (gap == 0 && (ParameterReflection.matchesParameterTypes(declaredParamTypes, argTypes) + || ParameterReflection.acceptsArgumentTypes(declaredParamTypes, argTypes)) + && (foundParamTypes == null + || ParameterReflection.hasMoreSpecificTypes(declaredParamTypes, foundParamTypes))) { found = declaredMethod; foundParamTypes = declaredParamTypes; } @@ -129,7 +130,7 @@ private static Method findCompatibleMethodIfAvailable(Class theClass, String } Method methodFound = null; - while(true) { + while (true) { Method compatibleMethod = findCompatibleMethodInClass(theClass, methodName, argTypes); if (compatibleMethod != null && (methodFound == null || ParameterReflection.hasMoreSpecificTypes(compatibleMethod.getParameterTypes(), methodFound.getParameterTypes()))) { methodFound = compatibleMethod; @@ -155,8 +156,8 @@ public static void makeAccessible(AccessibleObject classMember) { // return true if the two types are same type. private static boolean isSameType(Class firstType, Class secondType) { - return firstType == secondType || - firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) || - secondType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType); + return firstType == secondType + || firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) + || secondType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType); } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java index f97195305aa964..9c47ffe5d84574 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java @@ -26,7 +26,7 @@ static boolean matchesParameterTypes(Class[] declaredTypes, Class[] specif if (declaredTypes == null || specifiedTypes == null) { throw new IllegalArgumentException(); } - for(int i = 0; i < declaredTypes.length; ++i) { + for (int i = 0; i < declaredTypes.length; ++i) { Class declaredType = declaredTypes[i]; Class specifiedType = specifiedTypes[i]; if (!isSameType(declaredType, specifiedType)) { @@ -44,7 +44,7 @@ static boolean acceptsArgumentTypes(Class[] paramTypes, Class[] argTypes) if (paramTypes == null || argTypes == null) { throw new IllegalArgumentException(); } - for(int i = 0; i < paramTypes.length; ++i) { + for (int i = 0; i < paramTypes.length; ++i) { Class parType = paramTypes[i]; Class argType = argTypes[i]; if (!isSameType(parType, argType) && !parType.isAssignableFrom(argType)) { @@ -67,7 +67,7 @@ static Class[] getArgumentTypesFromArgumentValues(Object... args) { } else { Class[] argTypes = new Class[args.length]; - for(int i = 0; i < args.length; ++i) { + for (int i = 0; i < args.length; ++i) { argTypes[i] = getArgumentTypeFromArgumentValue(i, args); } @@ -102,7 +102,7 @@ static boolean hasMoreSpecificTypes(Class[] currentTypes, Class[] previous if (currentTypes == null || previousTypes == null) { throw new IllegalArgumentException(); } - for(int i = 0; i < currentTypes.length; ++i) { + for (int i = 0; i < currentTypes.length; ++i) { Class current = wrappedIfPrimitive(currentTypes[i]); Class previous = wrappedIfPrimitive(previousTypes[i]); if (current != previous && previous.isAssignableFrom(current)) { @@ -160,9 +160,9 @@ private static Class wrappedIfPrimitive(Class parameterType) { // return true if the two types are same type. private static boolean isSameType(Class firstType, Class secondType) { - return firstType == secondType || - firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) || - secondType.isPrimitive() && secondType == AutoType.getPrimitiveType(firstType); + return firstType == secondType + || firstType.isPrimitive() && firstType == AutoType.getPrimitiveType(secondType) + || secondType.isPrimitive() && secondType == AutoType.getPrimitiveType(firstType); } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertiesSet.java b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertiesSet.java index 0eb32c6ad7d68b..5a0fd62a5116e3 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertiesSet.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertiesSet.java @@ -44,8 +44,8 @@ private PropertiesSet(T schemaGroup, Map properties) { @SuppressWarnings("unchecked") public U get(PropertySchema prop) throws NoSuchElementException { - return properties.containsKey(prop.getName()) ? - (U) properties.get(prop.getName()) : prop.getDefaultValue().get(); + return properties.containsKey(prop.getName()) + ? (U) properties.get(prop.getName()) : prop.getDefaultValue().get(); } public List getModifiedSchemas() { diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java index af61642ac11c00..b68d2072bfdbe3 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java @@ -31,7 +31,7 @@ import java.util.Date; import java.util.Optional; -@SuppressWarnings({"unchecked","rawtypes"}) +@SuppressWarnings({"unchecked", "rawtypes"}) public abstract class PropertySchema { private final String name; private final boolean required; @@ -190,8 +190,8 @@ public static final class BooleanProperty extends ComparableProperty { @Override public Boolean read(String rawVal) { - if (rawVal == null || - (!rawVal.equalsIgnoreCase("true") && !rawVal.equalsIgnoreCase("false"))) { + if (rawVal == null || (!rawVal.equalsIgnoreCase("true") + && !rawVal.equalsIgnoreCase("false"))) { throw new IllegalArgumentException(String.format("Invalid boolean : %s, use true or false", rawVal)); } diff --git a/fe/fe-common/src/test/java/org/apache/doris/common/io/BitmapValueTest.java b/fe/fe-common/src/test/java/org/apache/doris/common/io/BitmapValueTest.java index 8c802da3ee0f99..785574981dbb5c 100644 --- a/fe/fe-common/src/test/java/org/apache/doris/common/io/BitmapValueTest.java +++ b/fe/fe-common/src/test/java/org/apache/doris/common/io/BitmapValueTest.java @@ -117,7 +117,7 @@ public void testBitmapValueAnd() { // empty and bitmap BitmapValue bitmapValue3 = new BitmapValue(); - BitmapValue bitmapValue3Dot1 =new BitmapValue(); + BitmapValue bitmapValue3Dot1 = new BitmapValue(); bitmapValue3Dot1.add(1); bitmapValue3Dot1.add(2); bitmapValue3.and(bitmapValue3Dot1); @@ -267,7 +267,7 @@ public void testBitmapValueOr() { BitmapValue bitmapValue7 = new BitmapValue(); bitmapValue7.add(1); bitmapValue7.add(2); - BitmapValue bitmapValue7Dot1 =new BitmapValue(); + BitmapValue bitmapValue7Dot1 = new BitmapValue(); bitmapValue7.or(bitmapValue7Dot1); Assert.assertTrue(bitmapValue7.getBitmapType() == BitmapValue.BITMAP_VALUE); @@ -275,7 +275,7 @@ public void testBitmapValueOr() { BitmapValue bitmapValue8 = new BitmapValue(); bitmapValue8.add(1); bitmapValue8.add(2); - BitmapValue bitmapValue8Dot1 =new BitmapValue(); + BitmapValue bitmapValue8Dot1 = new BitmapValue(); bitmapValue8Dot1.add(1); bitmapValue8.or(bitmapValue8Dot1); Assert.assertTrue(bitmapValue8.getBitmapType() == BitmapValue.BITMAP_VALUE); @@ -284,7 +284,7 @@ public void testBitmapValueOr() { BitmapValue bitmapValue9 = new BitmapValue(); bitmapValue9.add(1); bitmapValue9.add(2); - BitmapValue bitmapValue9Dot1 =new BitmapValue(); + BitmapValue bitmapValue9Dot1 = new BitmapValue(); bitmapValue9.or(bitmapValue9Dot1); Assert.assertTrue(bitmapValue9.getBitmapType() == BitmapValue.BITMAP_VALUE); } @@ -459,7 +459,7 @@ public void testEqual() { @Test - public void testBitmapOrDeepCopy(){ + public void testBitmapOrDeepCopy() { // this test is added for issue #6452 // baseIndex bitmap type == Roaring64Map BitmapValue baseIndex1 = new BitmapValue(); diff --git a/fe/fe-common/src/test/java/org/apache/doris/common/io/HllTest.java b/fe/fe-common/src/test/java/org/apache/doris/common/io/HllTest.java index 78ba33b6929f8c..fabc7c1f8da70f 100644 --- a/fe/fe-common/src/test/java/org/apache/doris/common/io/HllTest.java +++ b/fe/fe-common/src/test/java/org/apache/doris/common/io/HllTest.java @@ -76,8 +76,8 @@ public void hllBasicTest() throws IOException { } Assert.assertTrue(sparseHll.getType() == Hll.HLL_DATA_FULL); // 2% error rate - Assert.assertTrue(sparseHll.estimateCardinality() > Hll.HLL_SPARSE_THRESHOLD * (1 - 0.02) && - sparseHll.estimateCardinality() < Hll.HLL_SPARSE_THRESHOLD * (1 + 0.02)); + Assert.assertTrue(sparseHll.estimateCardinality() > Hll.HLL_SPARSE_THRESHOLD * (1 - 0.02) + && sparseHll.estimateCardinality() < Hll.HLL_SPARSE_THRESHOLD * (1 + 0.02)); ByteArrayOutputStream sparseOutputStream = new ByteArrayOutputStream(); DataOutput sparseOutput = new DataOutputStream(sparseOutputStream); @@ -97,8 +97,8 @@ public void hllBasicTest() throws IOException { Assert.assertTrue(fullHll.getType() == Hll.HLL_DATA_FULL); // the result 32748 is consistent with C++ 's implementation Assert.assertTrue(fullHll.estimateCardinality() == 32748); - Assert.assertTrue(fullHll.estimateCardinality() > Short.MAX_VALUE * (1 - 0.02) && - fullHll.estimateCardinality() < Short.MAX_VALUE * (1 + 0.02)); + Assert.assertTrue(fullHll.estimateCardinality() > Short.MAX_VALUE * (1 - 0.02) + && fullHll.estimateCardinality() < Short.MAX_VALUE * (1 + 0.02)); ByteArrayOutputStream fullHllOutputStream = new ByteArrayOutputStream(); DataOutput fullHllOutput = new DataOutputStream(fullHllOutputStream); @@ -116,18 +116,18 @@ public void hllBasicTest() throws IOException { @Test public void testCompareEstimateValueWithBe() throws IOException { //empty - { + { // CHECKSTYLE IGNORE THIS LINE Hll hll = new Hll(); long estimateValue = hll.estimateCardinality(); byte[] serializedByte = serializeHll(hll); hll = deserializeHll(serializedByte); Assert.assertTrue(estimateValue == hll.estimateCardinality()); - } + } // CHECKSTYLE IGNORE THIS LINE // explicit [0. 100) Hll explicitHll = new Hll(); - { + { // CHECKSTYLE IGNORE THIS LINE for (int i = 0; i < 100; i++) { explicitHll.updateWithHash(i); } @@ -144,11 +144,11 @@ public void testCompareEstimateValueWithBe() throws IOException { explicitHll.merge(otherHll); // compare with C++ version result Assert.assertTrue(explicitHll.estimateCardinality() == 100); - } + } // CHECKSTYLE IGNORE THIS LINE // sparse [1024, 2048) Hll sparseHll = new Hll(); - { + { // CHECKSTYLE IGNORE THIS LINE for (int i = 0; i < 1024; i++) { sparseHll.updateWithHash(i + 1024); } @@ -174,11 +174,11 @@ public void testCompareEstimateValueWithBe() throws IOException { Assert.assertTrue(cardinality > 1000 && cardinality < 1045); // compare with C++ version result Assert.assertTrue(cardinality == 1023); - } + } // CHECKSTYLE IGNORE THIS LINE // full [64 * 1024, 128 * 1024) Hll fullHll = new Hll(); - { + { // CHECKSTYLE IGNORE THIS LINE for (int i = 0; i < 64 * 1024; i++) { fullHll.updateWithHash(64 * 1024 + i); } @@ -195,16 +195,16 @@ public void testCompareEstimateValueWithBe() throws IOException { // compare with C++ version result Assert.assertTrue(preValue == 66112); - } + } // CHECKSTYLE IGNORE THIS LINE // merge explicit to empty_hll - { + { // CHECKSTYLE IGNORE THIS LINE Hll newExplicit = new Hll(); newExplicit.merge(explicitHll); Assert.assertTrue(newExplicit.estimateCardinality() == 100); // merge another explicit - { + { // CHECKSTYLE IGNORE THIS LINE Hll otherHll = new Hll(); for (int i = 100; i < 200; i++) { otherHll.updateWithHash(i); @@ -214,18 +214,18 @@ public void testCompareEstimateValueWithBe() throws IOException { Assert.assertTrue(otherHll.estimateCardinality() > 190); // compare with C++ version result Assert.assertTrue(otherHll.estimateCardinality() == 201); - } + } // CHECKSTYLE IGNORE THIS LINE // merge full - { + { // CHECKSTYLE IGNORE THIS LINE newExplicit.merge(fullHll); Assert.assertTrue(newExplicit.estimateCardinality() > fullHll.estimateCardinality()); // compare with C++ version result Assert.assertTrue(newExplicit.estimateCardinality() == 66250); - } - } + } // CHECKSTYLE IGNORE THIS LINE + } // CHECKSTYLE IGNORE THIS LINE // merge sparse into empty - { + { // CHECKSTYLE IGNORE THIS LINE Hll newSparseHll = new Hll(); newSparseHll.merge(sparseHll); Assert.assertTrue(sparseHll.estimateCardinality() == newSparseHll.estimateCardinality()); @@ -243,7 +243,7 @@ public void testCompareEstimateValueWithBe() throws IOException { Assert.assertTrue(newSparseHll.estimateCardinality() > fullHll.estimateCardinality()); // compare with C++ version result Assert.assertTrue(newSparseHll.estimateCardinality() == 67316); - } + } // CHECKSTYLE IGNORE THIS LINE } diff --git a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java index 612c97893740ac..32ebb5d6917a48 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java +++ b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java @@ -145,7 +145,7 @@ public static void start(String dorisHomeDir, String pidDir, String[] args) { httpServer.setWorkers(Config.jetty_server_workers); httpServer.setMaxThreads(Config.jetty_threadPool_maxThreads); httpServer.setMinThreads(Config.jetty_threadPool_minThreads); - httpServer.setMaxHttpHeaderSize (Config.jetty_server_max_http_header_size); + httpServer.setMaxHttpHeaderSize(Config.jetty_server_max_http_header_size); httpServer.start(); qeService.start(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java index 34fc631f2ee9f2..cb58707c1a1977 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java @@ -45,8 +45,7 @@ public abstract class AlterJobV2 implements Writable { public enum JobState { PENDING, // Job is created - WAITING_TXN, // New replicas are created and Shadow catalog object is visible for incoming txns, - // waiting for previous txns to be finished + WAITING_TXN, // New replicas are created and Shadow catalog object is visible for incoming txns, waiting for previous txns to be finished RUNNING, // alter tasks are sent to BE, and waiting for them finished. FINISHED, // job is done CANCELLED; // job is cancelled(failed or be cancelled by user) @@ -159,17 +158,17 @@ public synchronized void run() { try { switch (jobState) { - case PENDING: - runPendingJob(); - break; - case WAITING_TXN: - runWaitingTxnJob(); - break; - case RUNNING: - runRunningJob(); - break; - default: - break; + case PENDING: + runPendingJob(); + break; + case WAITING_TXN: + runWaitingTxnJob(); + break; + case RUNNING: + runRunningJob(); + break; + default: + break; } } catch (AlterCancelException e) { cancelImpl(e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java index 4d7eb8f5a5ba6e..2c8e3dc550aef1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java @@ -246,8 +246,8 @@ public void processBatchAddRollup(List alterClauses, Database db, O Map properties = addRollupClause.getProperties(); if (properties == null || !properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_FORMAT) || !properties.get(PropertyAnalyzer.PROPERTIES_STORAGE_FORMAT).equalsIgnoreCase("v2")) { - throw new DdlException("Table[" + olapTable.getName() + "] can not " + - "add segment v2 rollup index without setting storage format to v2."); + throw new DdlException("Table[" + olapTable.getName() + "] can not " + + "add segment v2 rollup index without setting storage format to v2."); } rollupIndexName = NEW_STORAGE_FORMAT_INDEX_NAME_PREFIX + olapTable.getName(); changeStorageFormat = true; @@ -461,8 +461,8 @@ private List checkAndPrepareMaterializedView(CreateMaterializedViewStmt } if (baseAggregationType != mvAggregationType) { throw new DdlException( - "The aggregation type of column[" + mvColumnName + "] must be same as the aggregate " + - "type of base column in aggregate table"); + "The aggregation type of column[" + mvColumnName + "] must be same as the aggregate " + + "type of base column in aggregate table"); } if (baseAggregationType != null && baseAggregationType.isReplaceFamily() && olapTable .getKeysNum() != numOfKeys) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index 89b40db88b6f74..46c65acf211e09 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -255,8 +255,8 @@ private void processDropColumn(DropColumnClause alterClause, OlapTable olapTable for (Column column : baseSchema) { if (column.isKey() && column.getName().equalsIgnoreCase(dropColName)) { isKey = true; - } else if (AggregateType.REPLACE == column.getAggregationType() || - AggregateType.REPLACE_IF_NOT_NULL == column.getAggregationType()) { + } else if (AggregateType.REPLACE == column.getAggregationType() + || AggregateType.REPLACE_IF_NOT_NULL == column.getAggregationType()) { hasReplaceColumn = true; } } @@ -273,8 +273,8 @@ private void processDropColumn(DropColumnClause alterClause, OlapTable olapTable for (Column column : targetIndexSchema) { if (column.isKey() && column.getName().equalsIgnoreCase(dropColName)) { isKey = true; - } else if (AggregateType.REPLACE == column.getAggregationType() || - AggregateType.REPLACE_IF_NOT_NULL == column.getAggregationType()) { + } else if (AggregateType.REPLACE == column.getAggregationType() + || AggregateType.REPLACE_IF_NOT_NULL == column.getAggregationType()) { hasReplaceColumn = true; } } @@ -360,7 +360,6 @@ private void processModifyColumn(ModifyColumnClause alterClause, Table externalT String newColName = modColumn.getName(); boolean hasColPos = (columnPos != null && !columnPos.isFirst()); boolean found = false; - boolean typeChanged = false; int modColIndex = -1; int lastColIndex = -1; @@ -369,9 +368,6 @@ private void processModifyColumn(ModifyColumnClause alterClause, Table externalT if (col.getName().equalsIgnoreCase(newColName)) { modColIndex = i; found = true; - if (!col.equals(modColumn)) { - typeChanged = true; - } } if (hasColPos) { if (col.getName().equalsIgnoreCase(columnPos.getLastCol())) { @@ -781,8 +777,8 @@ private void addColumnInternal(OlapTable olapTable, Column newColumn, ColumnPosi throw new DdlException("Can not assign aggregation method on column in Duplicate data model table: " + newColName); } if (!newColumn.isKey()) { - if (targetIndexId != -1L && - olapTable.getIndexMetaByIndexId(targetIndexId).getKeysType() == KeysType.AGG_KEYS) { + if (targetIndexId != -1L + && olapTable.getIndexMetaByIndexId(targetIndexId).getKeysType() == KeysType.AGG_KEYS) { throw new DdlException("Please add non-key column on base table directly"); } newColumn.setAggregationType(AggregateType.NONE, true); @@ -1402,8 +1398,8 @@ protected void runAfterCatalogReady() { private void runAlterJobV2() { runnableSchemaChangeJobV2.values().forEach( alterJobsV2 -> { - if (!alterJobsV2.isDone() && !activeSchemaChangeJobsV2.containsKey(alterJobsV2.getJobId()) && - activeSchemaChangeJobsV2.size() < MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE) { + if (!alterJobsV2.isDone() && !activeSchemaChangeJobsV2.containsKey(alterJobsV2.getJobId()) + && activeSchemaChangeJobsV2.size() < MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE) { if (FeConstants.runningUnitTest) { alterJobsV2.run(); } else { @@ -1494,9 +1490,10 @@ public void process(List alterClauses, String clusterName, Database DynamicPartitionUtil.checkInputDynamicPartitionProperties(properties, olapTable.getPartitionInfo()); } catch (DdlException e) { // This table is not a dynamic partition table and didn't supply all dynamic partition properties - throw new DdlException("Table " + db.getFullName() + "." + - olapTable.getName() + " is not a dynamic partition table. Use command `HELP ALTER TABLE` " + - "to see how to change a normal table to a dynamic partition table."); + throw new DdlException("Table " + db.getFullName() + "." + + olapTable.getName() + " is not a dynamic partition table." + + " Use command `HELP ALTER TABLE` " + + "to see how to change a normal table to a dynamic partition table."); } } Catalog.getCurrentCatalog().modifyTableDynamicPartition(db, olapTable, properties); @@ -1662,8 +1659,8 @@ public void updatePartitionsInMemoryMeta(Database db, try { updatePartitionInMemoryMeta(db, olapTable.getName(), partitionName, isInMemory); } catch (Exception e) { - String errMsg = "Failed to update partition[" + partitionName + "]'s 'in_memory' property. " + - "The reason is [" + e.getMessage() + "]"; + String errMsg = "Failed to update partition[" + partitionName + "]'s 'in_memory' property. " + + "The reason is [" + e.getMessage() + "]"; throw new DdlException(errMsg); } } @@ -1765,8 +1762,8 @@ public void cancel(CancelStmt stmt) throws DdlException { OlapTable olapTable = db.getOlapTableOrDdlException(tableName); olapTable.writeLockOrDdlException(); try { - if (olapTable.getState() != OlapTableState.SCHEMA_CHANGE && - olapTable.getState() != OlapTableState.WAITING_STABLE) { + if (olapTable.getState() != OlapTableState.SCHEMA_CHANGE + && olapTable.getState() != OlapTableState.WAITING_STABLE) { throw new DdlException("Table[" + tableName + "] is not under SCHEMA_CHANGE."); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java index 605df4cb96adba..2d55523d7567d1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java @@ -273,7 +273,7 @@ protected void runPendingJob() throws AlterCancelException { AgentTaskQueue.addBatchTask(batchTask); AgentTaskExecutor.submit(batchTask); long timeout = Math.min(Config.tablet_create_timeout_second * 1000L * totalReplicaNum, - Config.max_create_table_timeout_second * 1000L); + Config.max_create_table_timeout_second * 1000L); boolean ok = false; try { ok = countDownLatch.await(timeout, TimeUnit.MILLISECONDS); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddColumnClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddColumnClause.java index f92ee18ba0b24c..03afe36ed5144d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddColumnClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddColumnClause.java @@ -42,7 +42,9 @@ public class AddColumnClause extends AlterTableClause { // set in analyze private Column column; - public Column getColumn() { return column; } + public Column getColumn() { + return column; + } public ColumnPosition getColPos() { return colPos; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java index 988dff558439de..d2ea7443e90a59 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java @@ -77,7 +77,9 @@ public enum AggPhase { SECOND, SECOND_MERGE; - public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; } + public boolean isMerge() { + return this == FIRST_MERGE || this == SECOND_MERGE; + } }; // created by createMergeAggInfo() @@ -153,8 +155,13 @@ private AggregateInfo(AggregateInfo other) { (other.partitionExprs != null) ? Expr.cloneList(other.partitionExprs) : null; } - public List getPartitionExprs() { return partitionExprs; } - public void setPartitionExprs(List exprs) { partitionExprs = exprs; } + public List getPartitionExprs() { + return partitionExprs; + } + + public void setPartitionExprs(List exprs) { + partitionExprs = exprs; + } /** * Creates complete AggregateInfo for groupingExprs and aggExprs, including @@ -189,9 +196,8 @@ static public AggregateInfo create( // 1: if aggExprs don't have distinct or have multi distinct , create aggregate info for // one stage aggregation. // 2: if aggExprs have one distinct , create aggregate info for two stage aggregation - boolean isMultiDistinct = result.estimateIfContainsMultiDistinct(distinctAggExprs); - if (distinctAggExprs.isEmpty() - || isMultiDistinct) { + boolean isMultiDistinct = estimateIfContainsMultiDistinct(distinctAggExprs); + if (distinctAggExprs.isEmpty() || isMultiDistinct) { // It is used to map new aggr expr to old expr to help create an external // reference to the aggregation node tuple result.setIsMultiDistinct(isMultiDistinct); @@ -225,7 +231,7 @@ static public AggregateInfo create( * @return */ public static boolean estimateIfContainsMultiDistinct(List distinctAggExprs) - throws AnalysisException { + throws AnalysisException { if (distinctAggExprs == null || distinctAggExprs.size() <= 0) { return false; @@ -330,8 +336,8 @@ private void createDistinctAggInfo( public ArrayList getMaterializedAggregateExprs() { ArrayList result = Lists.newArrayList(); - for (Integer i: materializedSlots) { - result.add(aggregateExprs.get(i)); + for (Integer i : materializedSlots) { + result.add(aggregateExprs.get(i)); } return result; } @@ -340,18 +346,30 @@ public AggregateInfo getMergeAggInfo() { return mergeAggInfo; } - public boolean isMerge() { return aggPhase.isMerge(); } - public boolean isDistinctAgg() { return secondPhaseDistinctAggInfo != null; } - public ExprSubstitutionMap getIntermediateSmap() { return intermediateTupleSmap; } - public ExprSubstitutionMap getOutputSmap() { return outputTupleSmap; } + public boolean isMerge() { + return aggPhase.isMerge(); + } + + public boolean isDistinctAgg() { + return secondPhaseDistinctAggInfo != null; + } + + public ExprSubstitutionMap getIntermediateSmap() { + return intermediateTupleSmap; + } + + public ExprSubstitutionMap getOutputSmap() { + return outputTupleSmap; + } + public ExprSubstitutionMap getOutputToIntermediateSmap() { return outputToIntermediateTupleSmap; } public boolean hasAggregateExprs() { - return !aggregateExprs.isEmpty() || - (secondPhaseDistinctAggInfo != null && - !secondPhaseDistinctAggInfo.getAggregateExprs().isEmpty()); + return !aggregateExprs.isEmpty() + || (secondPhaseDistinctAggInfo != null + && !secondPhaseDistinctAggInfo.getAggregateExprs().isEmpty()); } public void setIsMultiDistinct(boolean value) { @@ -419,10 +437,9 @@ public void substitute(ExprSubstitutionMap smap, Analyzer analyzer) { // The smap in this case should not substitute the aggs themselves, only // their subexpressions. - List substitutedAggs = - Expr.substituteList(aggregateExprs, smap, analyzer, false); + List substitutedAggs = Expr.substituteList(aggregateExprs, smap, analyzer, false); aggregateExprs.clear(); - for (Expr substitutedAgg: substitutedAggs) { + for (Expr substitutedAgg : substitutedAggs) { aggregateExprs.add((FunctionCallExpr) substitutedAgg); } @@ -461,8 +478,8 @@ private void createMergeAggInfo(Analyzer analyzer) { List paramExprs = new ArrayList<>(); // TODO(zhannngchen), change intermediate argument to a list, and remove this // ad-hoc logic - if (inputExpr.fn.functionName().equals("max_by") || - inputExpr.fn.functionName().equals("min_by")) { + if (inputExpr.fn.functionName().equals("max_by") + || inputExpr.fn.functionName().equals("min_by")) { paramExprs.addAll(inputExpr.getFnParams().exprs()); } else { paramExprs.add(new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()))); @@ -541,7 +558,6 @@ private void createSecondPhaseAggInfo( // construct agg exprs for original DISTINCT aggregate functions // (these aren't part of this.aggExprs) ArrayList secondPhaseAggExprs = Lists.newArrayList(); - int distinctExprPos = 0; for (FunctionCallExpr inputExpr : distinctAggExprs) { Preconditions.checkState(inputExpr.isAggregateFunction()); FunctionCallExpr aggExpr = null; @@ -638,8 +654,8 @@ private void createSecondPhaseAggSMap( int numOrigGroupingExprs = inputAggInfo.getGroupingExprs().size() - numDistinctParams; Preconditions.checkState( - slotDescs.size() == numOrigGroupingExprs + distinctAggExprs.size() + - inputAggInfo.getAggregateExprs().size()); + slotDescs.size() == numOrigGroupingExprs + distinctAggExprs.size() + + inputAggInfo.getAggregateExprs().size()); // original grouping exprs -> first m slots for (int i = 0; i < numOrigGroupingExprs; ++i, ++slotIdx) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java index 7fa14b758d0e6b..b44dc45589f594 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java @@ -129,8 +129,8 @@ private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple int aggregateExprStartIndex = groupingExprs.size(); // if agg is grouping set, so we should set all groupingExpr unless last groupingExpr // must set be be nullable - boolean isGroupingSet = !groupingExprs.isEmpty() && - groupingExprs.get(groupingExprs.size() - 1) instanceof VirtualSlotRef; + boolean isGroupingSet = !groupingExprs.isEmpty() + && groupingExprs.get(groupingExprs.size() - 1) instanceof VirtualSlotRef; for (int i = 0; i < exprs.size(); ++i) { Expr expr = exprs.get(i); @@ -150,7 +150,7 @@ private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple } } else { Preconditions.checkArgument(expr instanceof FunctionCallExpr); - FunctionCallExpr aggExpr = (FunctionCallExpr)expr; + FunctionCallExpr aggExpr = (FunctionCallExpr) expr; if (aggExpr.isMergeAggFn()) { slotDesc.setLabel(aggExpr.getChild(0).toSql()); slotDesc.setSourceExpr(aggExpr.getChild(0)); @@ -159,13 +159,13 @@ private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple slotDesc.setSourceExpr(aggExpr); } - if (isOutputTuple && aggExpr.getFn().getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT) && - groupingExprs.size() == 0) { + if (isOutputTuple && aggExpr.getFn().getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT) + && groupingExprs.size() == 0) { slotDesc.setIsNullable(true); } if (!isOutputTuple) { - Type intermediateType = ((AggregateFunction)aggExpr.fn).getIntermediateType(); + Type intermediateType = ((AggregateFunction) aggExpr.fn).getIntermediateType(); if (intermediateType != null) { // Use the output type as intermediate if the function has a wildcard decimal. if (!intermediateType.isWildcardDecimal()) { @@ -193,12 +193,30 @@ private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple public abstract void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap); - public ArrayList getGroupingExprs() { return groupingExprs; } - public ArrayList getAggregateExprs() { return aggregateExprs; } - public TupleDescriptor getOutputTupleDesc() { return outputTupleDesc; } - public TupleDescriptor getIntermediateTupleDesc() { return intermediateTupleDesc; } - public TupleId getIntermediateTupleId() { return intermediateTupleDesc.getId(); } - public TupleId getOutputTupleId() { return outputTupleDesc.getId(); } + public ArrayList getGroupingExprs() { + return groupingExprs; + } + + public ArrayList getAggregateExprs() { + return aggregateExprs; + } + + public TupleDescriptor getOutputTupleDesc() { + return outputTupleDesc; + } + + public TupleDescriptor getIntermediateTupleDesc() { + return intermediateTupleDesc; + } + + public TupleId getIntermediateTupleId() { + return intermediateTupleDesc.getId(); + } + + public TupleId getOutputTupleId() { + return outputTupleDesc.getId(); + } + public boolean requiresIntermediateTuple() { Preconditions.checkNotNull(intermediateTupleDesc); Preconditions.checkNotNull(outputTupleDesc); @@ -211,7 +229,7 @@ public boolean requiresIntermediateTuple() { * its output type. */ public static boolean requiresIntermediateTuple(List aggExprs) { - for (Expr aggExpr: aggExprs) { + for (Expr aggExpr : aggExprs) { Type intermediateType = ((AggregateFunction) aggExpr.fn).getIntermediateType(); if (intermediateType != null) { return true; @@ -225,7 +243,7 @@ public static boolean requiresIntermediateTuple(List aggExpr * is depend on argument */ public static boolean requiresIntermediateTuple(List aggExprs, boolean noGrouping) { - for (Expr aggExpr: aggExprs) { + for (Expr aggExpr : aggExprs) { Type intermediateType = ((AggregateFunction) aggExpr.fn).getIntermediateType(); if (intermediateType != null) { return true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java index 17d631a1a422bf..b5627c256543a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java @@ -62,7 +62,7 @@ public void analyze(Analyzer analyzer) throws UserException { tableName.analyze(analyzer); // check properties Optional optional = properties.keySet().stream().map(StatsType::fromString) - .filter(statsType -> !CONFIGURABLE_PROPERTIES_SET.contains(statsType)).findFirst(); + .filter(statsType -> !CONFIGURABLE_PROPERTIES_SET.contains(statsType)).findFirst(); if (optional.isPresent()) { throw new AnalysisException(optional.get() + " is invalid statistic"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java index a45ff28f3d6a04..daacdadb91b986 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java @@ -80,6 +80,8 @@ public void analyze(Analyzer analyzer) throws UserException { @Override public String toSql() { - return "ALTER DATABASE " + dbName + " SET " + (quotaType == QuotaType.DATA ? "DATA" : "REPLICA") +" QUOTA " + quotaValue; + return "ALTER DATABASE " + dbName + " SET " + + (quotaType == QuotaType.DATA ? "DATA" : "REPLICA") + + " QUOTA " + quotaValue; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java index 6d8ea6027ea47e..42661b7bdda716 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java @@ -56,7 +56,7 @@ public void analyze(Analyzer analyzer) throws UserException { tableName.analyze(analyzer); // check properties Optional optional = properties.keySet().stream().map(StatsType::fromString) - .filter(statsType -> !CONFIGURABLE_PROPERTIES_SET.contains(statsType)).findFirst(); + .filter(statsType -> !CONFIGURABLE_PROPERTIES_SET.contains(statsType)).findFirst(); if (optional.isPresent()) { throw new AnalysisException(optional.get() + " is invalid statistic"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java index 6f7cd7f257e7ba..1085197b122d37 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java @@ -152,17 +152,17 @@ public void rewriteAlterClause(OlapTable table) throws UserException { public void checkExternalTableOperationAllow(Table table) throws UserException { List clauses = new ArrayList<>(); for (AlterClause alterClause : ops) { - if (alterClause instanceof TableRenameClause || - alterClause instanceof AddColumnClause || - alterClause instanceof AddColumnsClause || - alterClause instanceof DropColumnClause || - alterClause instanceof ModifyColumnClause || - alterClause instanceof ReorderColumnsClause || - alterClause instanceof ModifyEngineClause) { + if (alterClause instanceof TableRenameClause + || alterClause instanceof AddColumnClause + || alterClause instanceof AddColumnsClause + || alterClause instanceof DropColumnClause + || alterClause instanceof ModifyColumnClause + || alterClause instanceof ReorderColumnsClause + || alterClause instanceof ModifyEngineClause) { clauses.add(alterClause); } else { - throw new AnalysisException(table.getType().toString() + " [" + table.getName() + "] " + - "do not support " + alterClause.getOpType().toString() + " clause now"); + throw new AnalysisException(table.getType().toString() + " [" + table.getName() + "] " + + "do not support " + alterClause.getOpType().toString() + " clause now"); } } ops = clauses; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java index 91ed2c65c5edbe..124b7cc850b524 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java @@ -80,7 +80,7 @@ public String toSql() { sb.append(tableName.toSql()).append("\n"); if (cols != null) { sb.append("(\n"); - for (int i = 0 ; i < cols.size(); i++) { + for (int i = 0; i < cols.size(); i++) { if (i != 0) { sb.append(",\n"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java index bb3cf7730fc3a5..55169443e460ce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java @@ -154,7 +154,7 @@ public boolean equals(Object obj) { return false; } - AnalyticExpr o = (AnalyticExpr)obj; + AnalyticExpr o = (AnalyticExpr) obj; if (!fnCall.equals(o.getFnCall())) { return false; @@ -177,7 +177,9 @@ public boolean equals(Object obj) { * Analytic exprs cannot be constant. */ @Override - protected boolean isConstantImpl() { return false; } + protected boolean isConstantImpl() { + return false; + } @Override public Expr clone() { @@ -245,32 +247,11 @@ static private boolean isHllAggFn(Function fn) { return fn.functionName().equalsIgnoreCase(HLL_UNION_AGG); } - /** - * Rewrite the following analytic functions: - * percent_rank(), cume_dist() and ntile() - * - * Returns a new Expr if the analytic expr is rewritten, returns null if it's not one - * that we want to equal. - */ - public static Expr rewrite(AnalyticExpr analyticExpr) { - Function fn = analyticExpr.getFnCall().getFn(); - // TODO(zc) - // if (AnalyticExpr.isPercentRankFn(fn)) { - // return createPercentRank(analyticExpr); - // } else if (AnalyticExpr.isCumeDistFn(fn)) { - // return createCumeDist(analyticExpr); - // } else if (AnalyticExpr.isNtileFn(fn)) { - // return createNtile(analyticExpr); - // } - return null; - } - /** * Checks that the value expr of an offset boundary of a RANGE window is compatible * with orderingExprs (and that there's only a single ordering expr). */ - private void checkRangeOffsetBoundaryExpr(AnalyticWindow.Boundary boundary) - throws AnalysisException { + private void checkRangeOffsetBoundaryExpr(AnalyticWindow.Boundary boundary) throws AnalysisException { Preconditions.checkState(boundary.getType().isOffset()); if (orderByElements.size() > 1) { @@ -332,12 +313,12 @@ void checkDefaultValue(Analyzer analyzer) throws AnalysisException { out = true; } } else { - return ; + return; } if (out) { throw new AnalysisException("Column type=" - + getFnCall().getChildren().get(0).getType() + ", value is out of range ") ; + + getFnCall().getChildren().get(0).getType() + ", value is out of range "); } } @@ -365,10 +346,10 @@ void checkOffset(Analyzer analyzer) throws AnalysisException { double value = 0; if (offset instanceof IntLiteral) { - IntLiteral intl = (IntLiteral)offset; + IntLiteral intl = (IntLiteral) offset; value = intl.getDoubleValue(); } else if (offset instanceof LargeIntLiteral) { - LargeIntLiteral intl = (LargeIntLiteral)offset; + LargeIntLiteral intl = (LargeIntLiteral) offset; value = intl.getDoubleValue(); } @@ -490,8 +471,8 @@ public void analyzeImpl(Analyzer analyzer) throws AnalysisException { if (!VectorizedUtil.isVectorized()) { // min/max is not currently supported on sliding windows (i.e. start bound is not // unbounded). - if (window != null && isMinMax(fn) && - window.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) { + if (window != null && isMinMax(fn) + && window.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) { throw new AnalysisException( "'" + getFnCall().toSql() + "' is only supported with an " + "UNBOUNDED PRECEDING start bound."); @@ -585,10 +566,10 @@ private void standardize(Analyzer analyzer) throws AnalysisException { try { getFnCall().uncheckedCastChild(getFnCall().getChildren().get(0).getType(), 2); } catch (Exception e) { - LOG.warn("" , e); + LOG.warn("", e); throw new AnalysisException("Convert type error in offset fn(default value); old_type=" + getFnCall().getChildren().get(2).getType() + " new_type=" - + getFnCall().getChildren().get(0).getType()) ; + + getFnCall().getChildren().get(0).getType()); } if (getFnCall().getChildren().get(2) instanceof CastExpr) { @@ -602,7 +583,7 @@ private void standardize(Analyzer analyzer) throws AnalysisException { try { getFnCall().uncheckedCastChild(Type.BIGINT, 1); } catch (Exception e) { - LOG.warn("" , e); + LOG.warn("", e); throw new AnalysisException("Convert type error in offset fn(default offset); type=" + getFnCall().getChildren().get(1).getType()); } @@ -646,26 +627,24 @@ private void standardize(Analyzer analyzer) throws AnalysisException { } else { //TODO: Now we don't want to first_value to rewrite in vectorized mode; //if have to rewrite in future, could exec this rule; - if(!VectorizedUtil.isVectorized()) { - List paramExprs = Expr.cloneList(getFnCall().getParams().exprs()); - - if (window.getRightBoundary().getType() == BoundaryType.PRECEDING) { - // The number of rows preceding for the end bound determines the number of - // rows at the beginning of each partition that should have a NULL value. - paramExprs.add(window.getRightBoundary().getExpr()); - } else { - // -1 indicates that no NULL values are inserted even though we set the end - // bound to the start bound (which is PRECEDING) below; this is different from - // the default behavior of windows with an end bound PRECEDING. - paramExprs.add(new IntLiteral(-1, Type.BIGINT)); - } + if (!VectorizedUtil.isVectorized()) { + List paramExprs = Expr.cloneList(getFnCall().getParams().exprs()); + + if (window.getRightBoundary().getType() == BoundaryType.PRECEDING) { + // The number of rows preceding for the end bound determines the number of + // rows at the beginning of each partition that should have a NULL value. + paramExprs.add(window.getRightBoundary().getExpr()); + } else { + // -1 indicates that no NULL values are inserted even though we set the end + // bound to the start bound (which is PRECEDING) below; this is different from + // the default behavior of windows with an end bound PRECEDING. + paramExprs.add(new IntLiteral(-1, Type.BIGINT)); + } - window = new AnalyticWindow(window.getType(), - new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null), - window.getLeftBoundary()); - fnCall = new FunctionCallExpr("FIRST_VALUE_REWRITE", - new FunctionParams(paramExprs)); - // fnCall_.setIsInternalFnCall(true); + window = new AnalyticWindow(window.getType(), + new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null), + window.getLeftBoundary()); + fnCall = new FunctionCallExpr("FIRST_VALUE_REWRITE", new FunctionParams(paramExprs)); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java index 0b3cf4f1bc6b7e..8b88f4c4fc29f2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java @@ -53,7 +53,7 @@ private AnalyticInfo(ArrayList analyticExprs) { super(new ArrayList(), new ArrayList()); this.analyticExprs = Expr.cloneList(analyticExprs); // Extract the analytic function calls for each analytic expr. - for (Expr analyticExpr: analyticExprs) { + for (Expr analyticExpr : analyticExprs) { aggregateExprs.add(((AnalyticExpr) analyticExpr).getFnCall()); } analyticTupleSmap = new ExprSubstitutionMap(); @@ -71,16 +71,23 @@ private AnalyticInfo(AnalyticInfo other) { commonPartitionExprs = Expr.cloneList(other.commonPartitionExprs); } - public ArrayList getAnalyticExprs() { return analyticExprs; } - public ExprSubstitutionMap getSmap() { return analyticTupleSmap; } - public List getCommonPartitionExprs() { return commonPartitionExprs; } + public ArrayList getAnalyticExprs() { + return analyticExprs; + } + + public ExprSubstitutionMap getSmap() { + return analyticTupleSmap; + } + + public List getCommonPartitionExprs() { + return commonPartitionExprs; + } /** * Creates complete AnalyticInfo for analyticExprs, including tuple descriptors and * smaps. */ - static public AnalyticInfo create( - ArrayList analyticExprs, Analyzer analyzer) { + static public AnalyticInfo create(ArrayList analyticExprs, Analyzer analyzer) { Preconditions.checkState(analyticExprs != null && !analyticExprs.isEmpty()); Expr.removeDuplicates(analyticExprs); AnalyticInfo result = new AnalyticInfo(analyticExprs); @@ -113,7 +120,7 @@ static public AnalyticInfo create( */ private List computeCommonPartitionExprs() { List result = Lists.newArrayList(); - for (Expr analyticExpr: analyticExprs) { + for (Expr analyticExpr : analyticExprs) { Preconditions.checkState(analyticExpr.isAnalyzed()); List partitionExprs = ((AnalyticExpr) analyticExpr).getPartitionExprs(); if (partitionExprs == null) { @@ -159,23 +166,20 @@ public void checkConsistency() { // Check materialized slots. int numMaterializedSlots = 0; - for (SlotDescriptor slotDesc: slots) { + for (SlotDescriptor slotDesc : slots) { if (slotDesc.isMaterialized()) { ++numMaterializedSlots; } } - Preconditions.checkState(numMaterializedSlots == - materializedSlots.size()); + Preconditions.checkState(numMaterializedSlots == materializedSlots.size()); // Check that analytic expr return types match the slot descriptors. int slotIdx = 0; - for (int i = 0; i < analyticExprs.size(); ++i) { - Expr analyticExpr = analyticExprs.get(i); + for (Expr analyticExpr : analyticExprs) { Type slotType = slots.get(slotIdx).getType(); Preconditions.checkState(analyticExpr.getType().equals(slotType), - String.format("Analytic expr %s returns type %s but its analytic tuple " + - "slot has type %s", analyticExpr.toSql(), - analyticExpr.getType().toString(), slotType.toString())); + String.format("Analytic expr %s returns type %s but its analytic tuple " + "slot has type %s", + analyticExpr.toSql(), analyticExpr.getType().toString(), slotType.toString())); ++slotIdx; } } @@ -191,8 +195,12 @@ public String debugString() { } @Override - protected String tupleDebugName() { return "analytic-tuple"; } + protected String tupleDebugName() { + return "analytic-tuple"; + } @Override - public AnalyticInfo clone() { return new AnalyticInfo(this); } + public AnalyticInfo clone() { + return new AnalyticInfo(this); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java index 53265089b4be6c..b6bc06d3bbd615 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java @@ -206,7 +206,7 @@ public boolean equals(Object obj) { return false; } - Boundary o = (Boundary)obj; + Boundary o = (Boundary) obj; boolean exprEqual = (expr == null) == (o.expr == null); if (exprEqual && expr != null) { @@ -360,9 +360,8 @@ public boolean equals(Object obj) { return false; } - AnalyticWindow o = (AnalyticWindow)obj; - boolean rightBoundaryEqual = - (rightBoundary == null) == (o.rightBoundary == null); + AnalyticWindow o = (AnalyticWindow) obj; + boolean rightBoundaryEqual = (rightBoundary == null) == (o.rightBoundary == null); if (rightBoundaryEqual && rightBoundary != null) { rightBoundaryEqual = rightBoundary.equals(o.rightBoundary); @@ -382,7 +381,7 @@ public AnalyticWindow clone() { * Semantic analysis for expr of a PRECEDING/FOLLOWING clause. */ private void checkOffsetExpr(Analyzer analyzer, Boundary boundary) - throws AnalysisException { + throws AnalysisException { Preconditions.checkState(boundary.getType().isOffset()); Expr e = boundary.getExpr(); Preconditions.checkNotNull(e); @@ -392,9 +391,6 @@ private void checkOffsetExpr(Analyzer analyzer, Boundary boundary) if (e.isConstant() && e.getType().isNumericType()) { try { val = Expr.getConstFromExpr(e); -// val = TColumnValueUtil.getNumericVal( -// FeSupport.EvalConstExpr(e, analyzer.getQueryGlobals())); - if (val <= 0) { isPos = false; } @@ -428,7 +424,7 @@ private void checkOffsetExpr(Analyzer analyzer, Boundary boundary) * Check that b1 <= b2. */ private void checkOffsetBoundaries(Analyzer analyzer, Boundary b1, Boundary b2) - throws AnalysisException { + throws AnalysisException { Preconditions.checkState(b1.getType().isOffset()); Preconditions.checkState(b2.getType().isOffset()); Expr e1 = b1.getExpr(); @@ -439,8 +435,6 @@ private void checkOffsetBoundaries(Analyzer analyzer, Boundary b1, Boundary b2) e2 != null && e2.isConstant() && e2.getType().isNumericType()); try { -// TColumnValue val1 = FeSupport.EvalConstExpr(e1, analyzer.getQueryGlobals()); -// TColumnValue val2 = FeSupport.EvalConstExpr(e2, analyzer.getQueryGlobals()); double left = Expr.getConstFromExpr(e1); double right = Expr.getConstFromExpr(e2); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java index a5a18d0d3ced83..cebd197fd57b22 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java @@ -167,19 +167,50 @@ public void setIsSubquery() { isSubquery = true; globalState.containsSubquery = true; } - public boolean setHasPlanHints() { return globalState.hasPlanHints = true; } - public boolean hasPlanHints() { return globalState.hasPlanHints; } - public void setIsWithClause() { isWithClause = true; } - public boolean isWithClause() { return isWithClause; } - public void setUDFAllowed(boolean val) { this.isUDFAllowed = val; } - public boolean isUDFAllowed() { return this.isUDFAllowed; } - public void setTimezone(String timezone) { this.timezone = timezone; } - public String getTimezone() { return timezone; } + public boolean setHasPlanHints() { + return globalState.hasPlanHints = true; + } + + public boolean hasPlanHints() { + return globalState.hasPlanHints; + } + + public void setIsWithClause() { + isWithClause = true; + } + + public boolean isWithClause() { + return isWithClause; + } + + public void setUDFAllowed(boolean val) { + this.isUDFAllowed = val; + } - public void putAssignedRuntimeFilter(RuntimeFilter rf) { assignedRuntimeFilters.add(rf); } - public List getAssignedRuntimeFilter() { return assignedRuntimeFilters; } - public void clearAssignedRuntimeFilters() { assignedRuntimeFilters.clear(); } + public boolean isUDFAllowed() { + return this.isUDFAllowed; + } + + public void setTimezone(String timezone) { + this.timezone = timezone; + } + + public String getTimezone() { + return timezone; + } + + public void putAssignedRuntimeFilter(RuntimeFilter rf) { + assignedRuntimeFilters.add(rf); + } + + public List getAssignedRuntimeFilter() { + return assignedRuntimeFilters; + } + + public void clearAssignedRuntimeFilters() { + assignedRuntimeFilters.clear(); + } public long getAutoBroadcastJoinThreshold() { return globalState.autoBroadcastJoinThreshold; @@ -428,8 +459,13 @@ public static Analyzer createWithNewGlobalState(Analyzer parentAnalyzer) { return new Analyzer(parentAnalyzer, globalState); } - public void setIsExplain() { globalState.isExplain = true; } - public boolean isExplain() { return globalState.isExplain; } + public void setIsExplain() { + globalState.isExplain = true; + } + + public boolean isExplain() { + return globalState.isExplain; + } public int incrementCallDepth() { return ++callDepth; @@ -454,10 +490,10 @@ public void registerLocalView(View view) throws AnalysisException { List viewLabels = view.getColLabels(); List queryStmtLabels = view.getQueryStmt().getColLabels(); if (viewLabels.size() > queryStmtLabels.size()) { - throw new AnalysisException("WITH-clause view '" + view.getName() + - "' returns " + queryStmtLabels.size() + " columns, but " + - viewLabels.size() + " labels were specified. The number of column " + - "labels must be smaller or equal to the number of returned columns."); + throw new AnalysisException("WITH-clause view '" + view.getName() + + "' returns " + queryStmtLabels.size() + " columns, but " + + viewLabels.size() + " labels were specified. The number of column " + + "labels must be smaller or equal to the number of returned columns."); } } if (localViews.put(view.getName(), view) != null) { @@ -627,7 +663,7 @@ public TableRef resolveTableRef(TableRef tableRef) throws AnalysisException { partition -> partition.getState() == PartitionState.RESTORE ).collect(Collectors.toList()).isEmpty(); - if(!isNotRestoring){ + if (!isNotRestoring) { // if doing restore with partitions, the status check push down to OlapScanNode::computePartitionInfo to // support query that partitions is not restoring. } else { @@ -791,7 +827,7 @@ private TupleDescriptor resolveColumnRef(TableName tblName, String colName) thro //result = desc; if (!colName.equalsIgnoreCase(Column.DELETE_SIGN) && !isVisible(desc.getId())) { ErrorReport.reportAnalysisException(ErrorCode.ERR_ILLEGAL_COLUMN_REFERENCE_ERROR, - Joiner.on(".").join(tblName.getTbl(),colName)); + Joiner.on(".").join(tblName.getTbl(), colName)); } Column col = desc.getTable().getColumn(colName); if (col != null) { @@ -862,7 +898,7 @@ public void registerFullOuterJoinedConjunct(Expr e) { !globalState.fullOuterJoinedConjuncts.containsKey(e.getId())); List tids = Lists.newArrayList(); e.getIds(tids, null); - for (TupleId tid: tids) { + for (TupleId tid : tids) { if (!globalState.fullOuterJoinedTupleIds.containsKey(tid)) { continue; } @@ -871,8 +907,7 @@ public void registerFullOuterJoinedConjunct(Expr e) { break; } if (LOG.isDebugEnabled()) { - LOG.debug("registerFullOuterJoinedConjunct: " + - globalState.fullOuterJoinedConjuncts.toString()); + LOG.debug("registerFullOuterJoinedConjunct: " + globalState.fullOuterJoinedConjuncts); } } @@ -881,12 +916,11 @@ public void registerFullOuterJoinedConjunct(Expr e) { * rhsRef. */ public void registerFullOuterJoinedTids(List tids, TableRef rhsRef) { - for (TupleId tid: tids) { + for (TupleId tid : tids) { globalState.fullOuterJoinedTupleIds.put(tid, rhsRef); } if (LOG.isTraceEnabled()) { - LOG.trace("registerFullOuterJoinedTids: " + - globalState.fullOuterJoinedTupleIds.toString()); + LOG.trace("registerFullOuterJoinedTids: " + globalState.fullOuterJoinedTupleIds); } } @@ -895,12 +929,11 @@ public void registerFullOuterJoinedTids(List tids, TableRef rhsRef) { * All tuple of outer join should be null in slot desc */ public void registerOuterJoinedTids(List tids, TableRef rhsRef) { - for (TupleId tid: tids) { + for (TupleId tid : tids) { globalState.outerJoinedTupleIds.put(tid, rhsRef); } if (LOG.isDebugEnabled()) { - LOG.debug("registerOuterJoinedTids: " + - globalState.outerJoinedTupleIds.toString()); + LOG.debug("registerOuterJoinedTids: " + globalState.outerJoinedTupleIds); } } @@ -983,7 +1016,7 @@ public void registerOnSlotToLiteralExpr(Expr expr) { globalState.onSlotToLiteralExpr.add(expr); } - public void registerOnSlotToLiteralDeDuplication(Pair pair) { + public void registerOnSlotToLiteralDeDuplication(Pair pair) { globalState.onSlotToLiteralDeDuplication.add(pair); } @@ -1046,7 +1079,7 @@ public void registerConjuncts(Expr e, boolean fromHavingClause) throws AnalysisE // Register all conjuncts and handle constant conjuncts with ids public void registerConjuncts(Expr e, boolean fromHavingClause, List ids) throws AnalysisException { - for (Expr conjunct: e.getConjuncts()) { + for (Expr conjunct : e.getConjuncts()) { registerConjunct(conjunct); if (ids != null) { for (TupleId id : ids) { @@ -1263,10 +1296,9 @@ && canEvalPredicate(tupleIds, e)) { public List getAllUnassignedConjuncts(List tupleIds) { List result = Lists.newArrayList(); for (Expr e : globalState.conjuncts.values()) { - if (!e.isAuxExpr() - && e.isBoundByTupleIds(tupleIds) - && !globalState.assignedConjuncts.contains(e.getId()) - && !globalState.ojClauseByConjunct.containsKey(e.getId())) { + if (!e.isAuxExpr() && e.isBoundByTupleIds(tupleIds) + && !globalState.assignedConjuncts.contains(e.getId()) + && !globalState.ojClauseByConjunct.containsKey(e.getId())) { result.add(e); } } @@ -1384,7 +1416,7 @@ public boolean isVisible(TupleId tid) { } public boolean containsOuterJoinedTid(List tids) { - for (TupleId tid: tids) { + for (TupleId tid : tids) { if (isOuterJoined(tid)) { return true; } @@ -1427,7 +1459,7 @@ public List getAllConjuncts(TupleId id) { public List getRemainConjuncts(List tupleIds) { Set remainConjunctIds = Sets.newHashSet(); for (TupleId tupleId : tupleIds) { - if (tuplePredicates.get(tupleId) !=null) { + if (tuplePredicates.get(tupleId) != null) { remainConjunctIds.addAll(tuplePredicates.get(tupleId)); } } @@ -1448,7 +1480,7 @@ public List getOnSlotEqSlotExpr() { return new ArrayList<>(globalState.onSlotEqSlotExpr); } - public Set> getOnSlotEqSlotDeDuplication() { + public Set> getOnSlotEqSlotDeDuplication() { return Sets.newHashSet(globalState.onSlotEqSlotDeDuplication); } @@ -1498,9 +1530,14 @@ public void setVisibleSemiJoinedTuple(TupleId tid) { * Return true if this analyzer has no ancestors. (i.e. false for the analyzer created * for inline views/ union operands, etc.) */ - public boolean isRootAnalyzer() { return ancestors.isEmpty(); } + public boolean isRootAnalyzer() { + return ancestors.isEmpty(); + } + + public boolean hasAncestors() { + return !ancestors.isEmpty(); + } - public boolean hasAncestors() { return !ancestors.isEmpty(); } public Analyzer getParentAnalyzer() { return hasAncestors() ? ancestors.get(0) : null; } @@ -1510,10 +1547,17 @@ public Analyzer getParentAnalyzer() { * to return an empty result set, e.g., due to a limit 0 or a constant predicate * that evaluates to false. */ - public boolean hasEmptyResultSet() { return hasEmptyResultSet; } - public void setHasEmptyResultSet() { hasEmptyResultSet = true; } + public boolean hasEmptyResultSet() { + return hasEmptyResultSet; + } + + public void setHasEmptyResultSet() { + hasEmptyResultSet = true; + } - public boolean hasEmptySpjResultSet() { return hasEmptySpjResultSet; } + public boolean hasEmptySpjResultSet() { + return hasEmptySpjResultSet; + } public void setHasLimitOffsetClause(boolean hasLimitOffset) { this.hasLimitOffsetClause = hasLimitOffset; @@ -1538,7 +1582,7 @@ public void registerOnClauseConjuncts(List conjuncts, TableRef rhsRef) globalState.conjunctsByOjClause.put(rhsRef.getId(), ojConjuncts); } } - for (Expr conjunct: conjuncts) { + for (Expr conjunct : conjuncts) { conjunct.setIsOnClauseConjunct(true); registerConjunct(conjunct); if (rhsRef.getJoinOp().isOuterJoin()) { @@ -1631,7 +1675,7 @@ public TableRef getOjRef(Expr e) { return globalState.ojClauseByConjunct.get(e.getId()); } - /** + /** * Returns false if 'e' originates from an outer-join On-clause and it is incorrect to * evaluate 'e' at a node materializing 'tids'. Returns true otherwise. */ @@ -1655,12 +1699,12 @@ public List getEqJoinConjuncts(List lhsTblRefIds, // Contains all equi-join conjuncts that have one child fully bound by one of the // rhs table ref ids (the other child is not bound by that rhs table ref id). List conjunctIds = Lists.newArrayList(); - for (TupleId rhsId: rhsTblRefIds) { + for (TupleId rhsId : rhsTblRefIds) { List cids = globalState.eqJoinConjuncts.get(rhsId); if (cids == null) { continue; } - for (ExprId eid: cids) { + for (ExprId eid : cids) { if (!conjunctIds.contains(eid)) { conjunctIds.add(eid); } @@ -1679,12 +1723,12 @@ public List getEqJoinConjuncts(List lhsTblRefIds, List nodeTblRefIds = Lists.newArrayList(lhsTblRefIds); nodeTblRefIds.addAll(rhsTblRefIds); List result = Lists.newArrayList(); - for (ExprId conjunctId: conjunctIds) { + for (ExprId conjunctId : conjunctIds) { Expr e = globalState.conjuncts.get(conjunctId); Preconditions.checkState(e != null); - if (!canEvalFullOuterJoinedConjunct(e, nodeTblRefIds) || - !canEvalAntiJoinedConjunct(e, nodeTblRefIds) || - !canEvalOuterJoinedConjunct(e, nodeTblRefIds)) { + if (!canEvalFullOuterJoinedConjunct(e, nodeTblRefIds) + || !canEvalAntiJoinedConjunct(e, nodeTblRefIds) + || !canEvalOuterJoinedConjunct(e, nodeTblRefIds)) { continue; } @@ -2065,7 +2109,7 @@ public boolean canEvalPredicate(List tupleIds, Expr e) { } } - for (TupleId tid: tids) { + for (TupleId tid : tids) { TableRef rhsRef = getLastOjClause(tid); // this is not outer-joined; ignore if (rhsRef == null) { @@ -2145,13 +2189,13 @@ public boolean evalByJoin(Expr e) { public void materializeSlots(List exprs) { List slotIds = Lists.newArrayList(); - for (Expr e: exprs) { + for (Expr e : exprs) { Preconditions.checkState(e.isAnalyzed); e.getIds(null, slotIds); } - for (TupleDescriptor tupleDesc: this.getDescTbl().getTupleDescs()) { - for (SlotDescriptor slotDesc: tupleDesc.getSlots()) { + for (TupleDescriptor tupleDesc : this.getDescTbl().getTupleDescs()) { + for (SlotDescriptor slotDesc : tupleDesc.getSlots()) { if (slotIds.contains(slotDesc.getId())) { slotDesc.setIsMaterialized(true); } @@ -2159,7 +2203,9 @@ public void materializeSlots(List exprs) { } } - public Map getLocalViews() { return localViews; } + public Map getLocalViews() { + return localViews; + } public boolean isOuterJoined(TupleId tid) { return globalState.outerJoinedTupleIds.containsKey(tid); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java index 7e06d1b7aae62e..d9024de8072ad2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java @@ -132,7 +132,7 @@ public Expr uncheckedCastTo(Type targetType) throws AnalysisException { ArrayLiteral literal = new ArrayLiteral(this); for (int i = 0; i < children.size(); ++ i) { Expr child = children.get(i); - literal.children.set(i, child.uncheckedCastTo(((ArrayType)targetType).getItemType())); + literal.children.set(i, child.uncheckedCastTo(((ArrayType) targetType).getItemType())); } literal.setType(targetType); return literal; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BetweenPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BetweenPredicate.java index 5dc2e4c5a9fb01..84fe3112374bb4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BetweenPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BetweenPredicate.java @@ -49,13 +49,6 @@ protected BetweenPredicate(BetweenPredicate other) { isNotBetween = other.isNotBetween; } -// @Override -// public Expr reset() { -// super.reset(); -// originalChildren = Expr.resetList(originalChildren); -// return this; -// } - @Override public Expr clone() { return new BetweenPredicate(this); @@ -68,10 +61,10 @@ public boolean isNotBetween() { @Override public void analyzeImpl(Analyzer analyzer) throws AnalysisException { super.analyzeImpl(analyzer); - if (children.get(0) instanceof Subquery && - (children.get(1) instanceof Subquery || children.get(2) instanceof Subquery)) { - throw new AnalysisException("Comparison between subqueries is not " + - "supported in a BETWEEN predicate: " + toSql()); + if (children.get(0) instanceof Subquery + && (children.get(1) instanceof Subquery || children.get(2) instanceof Subquery)) { + throw new AnalysisException("Comparison between subqueries is not " + + "supported in a BETWEEN predicate: " + toSql()); } // if children has subquery, it will be written and reanalyzed in the future. if (children.get(0) instanceof Subquery @@ -82,10 +75,10 @@ public void analyzeImpl(Analyzer analyzer) throws AnalysisException { analyzer.castAllToCompatibleType(children); } - @Override - public boolean isVectorized() { - return false; - } + @Override + public boolean isVectorized() { + return false; + } @Override protected void toThrift(TExprNode msg) { @@ -96,19 +89,21 @@ protected void toThrift(TExprNode msg) { @Override public String toSqlImpl() { String notStr = (isNotBetween) ? "NOT " : ""; - return children.get(0).toSql() + " " + notStr + "BETWEEN " + - children.get(1).toSql() + " AND " + children.get(2).toSql(); + return children.get(0).toSql() + " " + notStr + "BETWEEN " + + children.get(1).toSql() + " AND " + children.get(2).toSql(); } @Override public String toDigestImpl() { String notStr = (isNotBetween) ? "NOT " : ""; - return children.get(0).toDigest() + " " + notStr + "BETWEEN " + - children.get(1).toDigest() + " AND " + children.get(2).toDigest(); + return children.get(0).toDigest() + " " + notStr + "BETWEEN " + + children.get(1).toDigest() + " AND " + children.get(2).toDigest(); } @Override - public Expr clone(ExprSubstitutionMap sMap) { return new BetweenPredicate(this); } + public Expr clone(ExprSubstitutionMap sMap) { + return new BetweenPredicate(this); + } @Override public boolean equals(Object o) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java index 271c8d22603146..12d6aa7b24f2f6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java @@ -134,11 +134,17 @@ public Operator converse() { } } - public boolean isEquivalence() { return this == EQ || this == EQ_FOR_NULL; }; + public boolean isEquivalence() { + return this == EQ || this == EQ_FOR_NULL; + } - public boolean isUnNullSafeEquivalence() { return this == EQ; }; + public boolean isUnNullSafeEquivalence() { + return this == EQ; + } - public boolean isUnequivalence() { return this == NE; } + public boolean isUnequivalence() { + return this == NE; + } } private Operator op; @@ -163,15 +169,20 @@ public BinaryPredicate(Operator op, Expr e1, Expr e2) { protected BinaryPredicate(BinaryPredicate other) { super(other); op = other.op; - slotIsleft= other.slotIsleft; + slotIsleft = other.slotIsleft; isInferred = other.isInferred; } - public boolean isInferred() { return isInferred; } - public void setIsInferred() { isInferred = true; } + public boolean isInferred() { + return isInferred; + } + + public void setIsInferred() { + isInferred = true; + } public static void initBuiltins(FunctionSet functionSet) { - for (Type t: Type.getSupportedTypes()) { + for (Type t : Type.getSupportedTypes()) { if (t.isNull()) { continue; // NULL is handled through type promotion. } @@ -201,7 +212,9 @@ public Operator getOp() { return op; } - public void setOp(Operator op) { this.op = op; } + public void setOp(Operator op) { + this.op = op; + } @Override public Expr negate() { @@ -339,17 +352,17 @@ private Type getCmpType() throws AnalysisException { // When int column compares with string, Mysql will convert string to int. // So it is also compatible with Mysql. - if (t1 == PrimitiveType.BIGINT && (t2 == PrimitiveType.VARCHAR || t2 ==PrimitiveType.STRING)) { + if (t1 == PrimitiveType.BIGINT && (t2 == PrimitiveType.VARCHAR || t2 == PrimitiveType.STRING)) { Expr rightChild = getChild(1); Long parsedLong = Type.tryParseToLong(rightChild); - if(parsedLong != null) { + if (parsedLong != null) { return Type.BIGINT; } } - if ((t1 == PrimitiveType.VARCHAR || t1 ==PrimitiveType.STRING) && t2 == PrimitiveType.BIGINT) { + if ((t1 == PrimitiveType.VARCHAR || t1 == PrimitiveType.STRING) && t2 == PrimitiveType.BIGINT) { Expr leftChild = getChild(0); Long parsedLong = Type.tryParseToLong(leftChild); - if(parsedLong != null) { + if (parsedLong != null) { return Type.BIGINT; } } @@ -404,8 +417,8 @@ public void analyzeImpl(Analyzer analyzer) throws AnalysisException { // determine selectivity Reference slotRefRef = new Reference(); - if (op == Operator.EQ && isSingleColumnPredicate(slotRefRef, - null) && slotRefRef.getRef().getNumDistinctValues() > 0) { + if (op == Operator.EQ && isSingleColumnPredicate(slotRefRef, null) + && slotRefRef.getRef().getNumDistinctValues() > 0) { Preconditions.checkState(slotRefRef.getRef() != null); selectivity = 1.0 / slotRefRef.getRef().getNumDistinctValues(); selectivity = Math.max(0, Math.min(1, selectivity)); @@ -604,11 +617,11 @@ public Expr getResultValue() throws AnalysisException { recursiveResetChildrenResult(); final Expr leftChildValue = getChild(0); final Expr rightChildValue = getChild(1); - if(!(leftChildValue instanceof LiteralExpr) + if (!(leftChildValue instanceof LiteralExpr) || !(rightChildValue instanceof LiteralExpr)) { return this; } - return compareLiteral((LiteralExpr)leftChildValue, (LiteralExpr)rightChildValue); + return compareLiteral((LiteralExpr) leftChildValue, (LiteralExpr) rightChildValue); } private Expr compareLiteral(LiteralExpr first, LiteralExpr second) throws AnalysisException { @@ -621,13 +634,13 @@ private Expr compareLiteral(LiteralExpr first, LiteralExpr second) throws Analys return new BoolLiteral(false); } } else { - if (isFirstNull || isSecondNull){ + if (isFirstNull || isSecondNull) { return new NullLiteral(); } } final int compareResult = first.compareLiteral(second); - switch(op) { + switch (op) { case EQ: case EQ_FOR_NULL: return new BoolLiteral(compareResult == 0); @@ -649,7 +662,7 @@ private Expr compareLiteral(LiteralExpr first, LiteralExpr second) throws Analys @Override public void setSelectivity() { - switch(op) { + switch (op) { case EQ: case EQ_FOR_NULL: { Reference slotRefRef = new Reference(); @@ -661,7 +674,8 @@ public void setSelectivity() { } } break; - } default: { + } + default: { // Reference hive selectivity = 1.0 / 3.0; break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java index ff66fb29e4078f..3ccb85f0ae3f9f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java @@ -20,7 +20,6 @@ import org.apache.doris.catalog.Function; import org.apache.doris.catalog.ScalarType; import org.apache.doris.catalog.Type; -import org.apache.doris.common.AnalysisException; import org.apache.doris.thrift.TAggregateFunction; import org.apache.doris.thrift.TAggregationOp; import org.apache.doris.thrift.TFunction; @@ -50,10 +49,8 @@ public boolean isReqIntermediateTuple() { } public BuiltinAggregateFunction(Operator op, ArrayList argTypes, - Type retType, org.apache.doris.catalog.Type intermediateType, boolean isAnalyticFn) - throws AnalysisException { - super(FunctionName.createBuiltinName(op.toString()), argTypes, - retType, false); + Type retType, org.apache.doris.catalog.Type intermediateType, boolean isAnalyticFn) { + super(FunctionName.createBuiltinName(op.toString()), argTypes, retType, false); Preconditions.checkState(intermediateType != null); Preconditions.checkState(op != null); // may be no need to analyze @@ -128,8 +125,8 @@ public enum Operator { // The intermediate type for this function if it is constant regardless of // input type. Set to null if it can only be determined during analysis. private final org.apache.doris.catalog.Type intermediateType; - private Operator(String description, TAggregationOp thriftOp, - org.apache.doris.catalog.Type intermediateType) { + Operator(String description, TAggregationOp thriftOp, + org.apache.doris.catalog.Type intermediateType) { this.description = description; this.thriftOp = thriftOp; this.intermediateType = intermediateType; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelAlterTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelAlterTableStmt.java index d49c68fd1e85ca..8f88a8580ab9d1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelAlterTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelAlterTableStmt.java @@ -88,8 +88,8 @@ public String toSql() { stringBuilder.append("CANCEL ALTER " + this.alterType); stringBuilder.append(" FROM " + dbTableName.toSql()); if (!CollectionUtils.isEmpty(alterJobIdList)) { - stringBuilder.append(" (") - .append(String.join(",",alterJobIdList.stream().map(String::valueOf).collect(Collectors.toList()))); + stringBuilder.append(" (").append(String.join(",", alterJobIdList.stream() + .map(String::valueOf).collect(Collectors.toList()))); stringBuilder.append(")"); } return stringBuilder.toString(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java index d2e9691bf2a04c..10cd2d065c5a50 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java @@ -65,11 +65,11 @@ public class CastExpr extends Expr { static { TYPE_NULLABLE_MODE = Maps.newHashMap(); - for (ScalarType fromType: Type.getSupportedTypes()) { + for (ScalarType fromType : Type.getSupportedTypes()) { if (fromType.isNull()) { continue; } - for (ScalarType toType: Type.getSupportedTypes()) { + for (ScalarType toType : Type.getSupportedTypes()) { if (fromType.isNull()) { continue; } @@ -138,9 +138,8 @@ public TypeDef getTargetTypeDef() { private static boolean disableRegisterCastingFunction(Type fromType, Type toType) { // Disable casting from boolean to decimal or datetime or date - if (fromType.isBoolean() && - (toType.equals(Type.DECIMALV2) || - toType.equals(Type.DATETIME) || toType.equals(Type.DATE))) { + if (fromType.isBoolean() + && (toType.equals(Type.DECIMALV2) || toType.equals(Type.DATETIME) || toType.equals(Type.DATE))) { return true; } @@ -173,7 +172,7 @@ public static void initBuiltins(FunctionSet functionSet) { + typeName; functionSet.addBuiltinBothScalaAndVectorized(ScalarFunction.createBuiltin(getFnName(toType), toType, TYPE_NULLABLE_MODE.get(new Pair<>(fromType, toType)), - Lists.newArrayList(fromType), false , + Lists.newArrayList(fromType), false, beSymbol, null, null, true)); } } @@ -186,11 +185,11 @@ public Expr clone() { @Override public String toSqlImpl() { - boolean isVerbose = ConnectContext.get() != null && - ConnectContext.get().getExecutor() != null && - ConnectContext.get().getExecutor().getParsedStmt() != null && - ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions() != null && - ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions().isVerbose(); + boolean isVerbose = ConnectContext.get() != null + && ConnectContext.get().getExecutor() != null + && ConnectContext.get().getExecutor().getParsedStmt() != null + && ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions() != null + && ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions().isVerbose(); if (isImplicit && !isVerbose) { return getChild(0).toSql(); } @@ -207,11 +206,11 @@ public String toSqlImpl() { @Override public String toDigestImpl() { - boolean isVerbose = ConnectContext.get() != null && - ConnectContext.get().getExecutor() != null && - ConnectContext.get().getExecutor().getParsedStmt() != null && - ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions() != null && - ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions().isVerbose(); + boolean isVerbose = ConnectContext.get() != null + && ConnectContext.get().getExecutor() != null + && ConnectContext.get().getExecutor().getParsedStmt() != null + && ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions() != null + && ConnectContext.get().getExecutor().getParsedStmt().getExplainOptions().isVerbose(); if (isImplicit && !isVerbose) { return getChild(0).toDigest(); } @@ -278,10 +277,10 @@ public void analyze() throws AnalysisException { fn = Catalog.getCurrentCatalog().getFunction( searchDesc, Function.CompareMode.IS_IDENTICAL); } - } else if (type.isArrayType()){ + } else if (type.isArrayType()) { fn = ScalarFunction.createBuiltin(getFnName(Type.ARRAY), type, Function.NullableMode.ALWAYS_NULLABLE, - Lists.newArrayList(Type.VARCHAR), false , + Lists.newArrayList(Type.VARCHAR), false, "doris::CastFunctions::cast_to_array_val", null, null, true); } @@ -334,8 +333,8 @@ public boolean equals(Object obj) { public Expr ignoreImplicitCast() { if (isImplicit) { // we don't expect to see to consecutive implicit casts - Preconditions.checkState( - !(getChild(0) instanceof CastExpr) || !((CastExpr) getChild(0)).isImplicit()); + Preconditions.checkState(!(getChild(0) instanceof CastExpr) + || !((CastExpr) getChild(0)).isImplicit()); return getChild(0); } else { return this; @@ -361,7 +360,7 @@ public Expr getResultValue() throws AnalysisException { } Expr targetExpr; try { - targetExpr = castTo((LiteralExpr)value); + targetExpr = castTo((LiteralExpr) value); } catch (AnalysisException ae) { targetExpr = this; } catch (NumberFormatException nfe) { @@ -493,7 +492,7 @@ private int getDigital(String desc, List parameters, List inputPar if (index != -1) { Expr expr = inputParamsExprs.get(index); if (expr.getType().isIntegerType()) { - return ((Long)((IntLiteral) expr).getRealValue()).intValue(); + return ((Long) ((IntLiteral) expr).getRealValue()).intValue(); } } return -1; @@ -501,8 +500,8 @@ private int getDigital(String desc, List parameters, List inputPar @Override public boolean isNullable() { - return children.get(0).isNullable() || - (children.get(0).getType().isStringType() && !getType().isStringType()) || - (!children.get(0).getType().isDateType() && getType().isDateType()); + return children.get(0).isNullable() + || (children.get(0).getType().isStringType() && !getType().isStringType()) + || (!children.get(0).getType().isDateType() && getType().isDateType()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java index 86a818ff8e159f..d3831f2de639f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java @@ -131,15 +131,41 @@ public static ColumnDef newSequenceColumnDef(Type type, AggregateType aggregateT "sequence column hidden column", false); } - public boolean isAllowNull() { return isAllowNull; } - public String getDefaultValue() { return defaultValue.value; } - public String getName() { return name; } - public AggregateType getAggregateType() { return aggregateType; } - public void setAggregateType(AggregateType aggregateType) { this.aggregateType = aggregateType; } - public boolean isKey() { return isKey; } - public void setIsKey(boolean isKey) { this.isKey = isKey; } - public TypeDef getTypeDef() { return typeDef; } - public Type getType() { return typeDef.getType(); } + public boolean isAllowNull() { + return isAllowNull; + } + + public String getDefaultValue() { + return defaultValue.value; + } + + public String getName() { + return name; + } + + public AggregateType getAggregateType() { + return aggregateType; + } + + public void setAggregateType(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + public boolean isKey() { + return isKey; + } + + public void setIsKey(boolean isKey) { + this.isKey = isKey; + } + + public TypeDef getTypeDef() { + return typeDef; + } + + public Type getType() { + return typeDef.getType(); + } public String getComment() { return comment; @@ -168,9 +194,9 @@ public void analyze(boolean isOlap) throws AnalysisException { Type type = typeDef.getType(); - if(!Config.enable_quantile_state_type && type.isQuantileStateType()) { - throw new AnalysisException("quantile_state is disabled" + - "Set config 'enable_quantile_state_type' = 'true' to enable this column type."); + if (!Config.enable_quantile_state_type && type.isQuantileStateType()) { + throw new AnalysisException("quantile_state is disabled" + + "Set config 'enable_quantile_state_type' = 'true' to enable this column type."); } // disable Bitmap Hll type in keys, values without aggregate function. @@ -270,10 +296,10 @@ public static void validateDefaultValue(Type type, String defaultValue) throws A case SMALLINT: case INT: case BIGINT: - IntLiteral intLiteral = new IntLiteral(defaultValue, type); + new IntLiteral(defaultValue, type); break; case LARGEINT: - LargeIntLiteral largeIntLiteral = new LargeIntLiteral(defaultValue); + new LargeIntLiteral(defaultValue); break; case FLOAT: FloatLiteral floatLiteral = new FloatLiteral(defaultValue); @@ -282,7 +308,7 @@ public static void validateDefaultValue(Type type, String defaultValue) throws A } break; case DOUBLE: - FloatLiteral doubleLiteral = new FloatLiteral(defaultValue); + new FloatLiteral(defaultValue); break; case DECIMALV2: DecimalLiteral decimalLiteral = new DecimalLiteral(defaultValue); @@ -290,7 +316,7 @@ public static void validateDefaultValue(Type type, String defaultValue) throws A break; case DATE: case DATETIME: - DateLiteral dateLiteral = new DateLiteral(defaultValue, type); + new DateLiteral(defaultValue, type); break; case CHAR: case VARCHAR: @@ -309,7 +335,7 @@ public static void validateDefaultValue(Type type, String defaultValue) throws A case STRUCT: break; case BOOLEAN: - BoolLiteral boolLiteral = new BoolLiteral(defaultValue); + new BoolLiteral(defaultValue); break; default: throw new AnalysisException("Unsupported type: " + type); @@ -346,5 +372,7 @@ public Column toColumn() { } @Override - public String toString() { return toSql(); } + public String toString() { + return toSql(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java index 87b0dfd8830521..d2175117a01527 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java @@ -57,8 +57,7 @@ public CompoundPredicate(Operator op, Expr e1, Expr e2) { this.op = op; Preconditions.checkNotNull(e1); children.add(e1); - Preconditions.checkArgument( - op == Operator.NOT && e2 == null || op != Operator.NOT && e2 != null); + Preconditions.checkArgument(op == Operator.NOT && e2 == null || op != Operator.NOT && e2 != null); if (e2 != null) { children.add(e2); } @@ -121,14 +120,13 @@ public void analyzeImpl(Analyzer analyzer) throws AnalysisException { for (Expr e : children) { if (!e.getType().equals(Type.BOOLEAN) && !e.getType().isNull()) { throw new AnalysisException(String.format( - "Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but " + - "returns type '%s'.", + "Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but " + + "returns type '%s'.", e.toSql(), toSql(), e.getType())); } } - if (getChild(0).selectivity == -1 || children.size() == 2 && getChild( - 1).selectivity == -1) { + if (getChild(0).selectivity == -1 || children.size() == 2 && getChild(1).selectivity == -1) { // give up if we're missing an input selectivity = -1; return; @@ -205,37 +203,36 @@ public static Expr createConjunction(Expr lhs, Expr rhs) { */ public static Expr createConjunctivePredicate(List conjuncts) { Expr conjunctivePred = null; - for (Expr expr: conjuncts) { - if (conjunctivePred == null) { - conjunctivePred = expr; - continue; - } - conjunctivePred = new CompoundPredicate(CompoundPredicate.Operator.AND, - expr, conjunctivePred); + for (Expr expr : conjuncts) { + if (conjunctivePred == null) { + conjunctivePred = expr; + continue; + } + conjunctivePred = new CompoundPredicate(CompoundPredicate.Operator.AND, expr, conjunctivePred); } return conjunctivePred; } - @Override + @Override public Expr getResultValue() throws AnalysisException { recursiveResetChildrenResult(); boolean compoundResult = false; if (op == Operator.NOT) { final Expr childValue = getChild(0); - if(!(childValue instanceof BoolLiteral)) { + if (!(childValue instanceof BoolLiteral)) { return this; } - final BoolLiteral boolChild = (BoolLiteral)childValue; + final BoolLiteral boolChild = (BoolLiteral) childValue; compoundResult = !boolChild.getValue(); } else { final Expr leftChildValue = getChild(0); final Expr rightChildValue = getChild(1); - if(!(leftChildValue instanceof BoolLiteral) + if (!(leftChildValue instanceof BoolLiteral) || !(rightChildValue instanceof BoolLiteral)) { return this; } - final BoolLiteral leftBoolValue = (BoolLiteral)leftChildValue; - final BoolLiteral rightBoolValue = (BoolLiteral)rightChildValue; + final BoolLiteral leftBoolValue = (BoolLiteral) leftChildValue; + final BoolLiteral rightBoolValue = (BoolLiteral) rightChildValue; switch (op) { case AND: compoundResult = leftBoolValue.getValue() && rightBoolValue.getValue(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateEncryptKeyStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateEncryptKeyStmt.java index ad694ad8caa062..626cef29b623a8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateEncryptKeyStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateEncryptKeyStmt.java @@ -40,7 +40,7 @@ * for example: * CREATE ENCRYPTKEY test.key1 AS "beijing"; */ -public class CreateEncryptKeyStmt extends DdlStmt{ +public class CreateEncryptKeyStmt extends DdlStmt { private final EncryptKeyName encryptKeyName; private final String keyString; private EncryptKey encryptKey; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java index 45a0a7ff42dcb8..eff6a8bb7c3fe1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java @@ -97,7 +97,7 @@ public class CreateRoutineLoadStmt extends DdlStmt { public static final String MAX_BATCH_SIZE_PROPERTY = "max_batch_size"; public static final String EXEC_MEM_LIMIT_PROPERTY = "exec_mem_limit"; - public static final String FORMAT = "format";// the value is csv or json, default is csv + public static final String FORMAT = "format"; // the value is csv or json, default is csv public static final String STRIP_OUTER_ARRAY = "strip_outer_array"; public static final String JSONPATHS = "jsonpaths"; public static final String JSONROOT = "json_root"; @@ -338,7 +338,7 @@ public void checkDBTable(Analyzer analyzer) throws AnalysisException { throw new AnalysisException("load by MERGE or DELETE is only supported in unique tables."); } if (mergeType != LoadTask.MergeType.APPEND - && !(table.getType() == Table.TableType.OLAP && ((OlapTable) table).hasDeleteSign()) ) { + && !(table.getType() == Table.TableType.OLAP && ((OlapTable) table).hasDeleteSign())) { throw new AnalysisException("load by MERGE or DELETE need to upgrade table to support batch delete."); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java index 70223c87cc27d6..b65aaa4d28d209 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java @@ -51,7 +51,7 @@ public CreateTableLikeStmt(boolean ifNotExists, TableName tableName, TableName e this.ifNotExists = ifNotExists; this.tableName = tableName; this.existedTableName = existedTableName; - if (!CollectionUtils.isEmpty(rollupNames) && withAllRollup){ + if (!CollectionUtils.isEmpty(rollupNames) && withAllRollup) { throw new DdlException("Either all or part of the rollup can be copied, not both"); } this.rollupNames = rollupNames; @@ -108,10 +108,10 @@ public void analyze(Analyzer analyzer) throws UserException { public String toSql() { StringBuilder sb = new StringBuilder(); sb.append("CREATE TABLE ").append(tableName.toSql()).append(" LIKE ").append(existedTableName.toSql()); - if (withAllRollup && CollectionUtils.isEmpty(rollupNames)){ + if (withAllRollup && CollectionUtils.isEmpty(rollupNames)) { sb.append(" WITH ROLLUP"); } - if (!withAllRollup && !CollectionUtils.isEmpty(rollupNames)){ + if (!withAllRollup && !CollectionUtils.isEmpty(rollupNames)) { sb.append(" WITH ROLLUP (").append(Joiner.on(",").join(rollupNames)).append(")"); } return sb.toString(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java index 5c2db118bb755c..44de0d27f95580 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java @@ -183,9 +183,13 @@ public CreateTableStmt(boolean ifNotExists, this.comment = Strings.nullToEmpty(comment); } - public void addColumnDef(ColumnDef columnDef) { columnDefs.add(columnDef); } + public void addColumnDef(ColumnDef columnDef) { + columnDefs.add(columnDef); + } - public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } public boolean isSetIfNotExists() { return ifNotExists; @@ -380,8 +384,8 @@ public void analyze(Analyzer analyzer) throws UserException { throw new AnalysisException("Array column can't support aggregation " + columnDef.getAggregateType()); } if (columnDef.isKey()) { - throw new AnalysisException("Array can only be used in the non-key column of" + - " the duplicate table at present."); + throw new AnalysisException("Array can only be used in the non-key column of" + + " the duplicate table at present."); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataSortInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataSortInfo.java index 783ccde0ec7bc0..4393dc162cf1bc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataSortInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataSortInfo.java @@ -54,7 +54,7 @@ public DataSortInfo(Map properties) { } } - public DataSortInfo (TSortType sortType, int colNum) { + public DataSortInfo(TSortType sortType, int colNum) { this.sortType = sortType; this.colNum = colNum; } @@ -97,8 +97,8 @@ public boolean equals(DataSortInfo dataSortInfo) { } public String toSql() { - String res = ",\n\"" + DATA_SORT_TYPE + "\" = \"" + this.sortType + "\"" + - ",\n\"" + DATA_SORT_COL_NUM + "\" = \"" + this.colNum + "\""; + String res = ",\n\"" + DATA_SORT_TYPE + "\" = \"" + this.sortType + "\"" + + ",\n\"" + DATA_SORT_COL_NUM + "\" = \"" + this.colNum + "\""; return res; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java index 75fcb05f2a117d..a59952f55edd38 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java @@ -1054,7 +1054,7 @@ private boolean checkRange() { || microsecond > MAX_MICROSECOND; } private boolean checkDate() { - if (month != 0 && day > DAYS_IN_MONTH[((int) month)]){ + if (month != 0 && day > DAYS_IN_MONTH[((int) month)]) { if (month == 2 && day == 29 && Year.isLeap(year)) { return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java index 07abe74bed66c4..b2b1278d5d607c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java @@ -193,7 +193,6 @@ public double getDoubleValue() { protected void toThrift(TExprNode msg) { // TODO(hujie01) deal with loss information msg.node_type = TExprNodeType.DECIMAL_LITERAL; - BigDecimal v = new BigDecimal(value.toBigInteger()); msg.decimal_literal = new TDecimalLiteral(value.toPlainString()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java index e2f933291f9a0a..526c6e4806941c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java @@ -82,7 +82,7 @@ public TupleDescriptor copyTupleDescriptor(TupleId srcId, String debugName) { tupleDescs.put(d.getId(), d); // create copies of slots TupleDescriptor src = tupleDescs.get(srcId); - for (SlotDescriptor slot: src.getSlots()) { + for (SlotDescriptor slot : src.getSlots()) { copySlotDescriptor(d, slot); } d.computeStatAndMemLayout(); @@ -119,7 +119,7 @@ public void addReferencedTable(Table table) { * Marks all slots in list as materialized. */ public void markSlotsMaterialized(List ids) { - for (SlotId id: ids) { + for (SlotId id : ids) { getSlotDesc(id).setIsMaterialized(true); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropFunctionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropFunctionStmt.java index da90e1c0f4f8ae..4e88751ede41a6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropFunctionStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropFunctionStmt.java @@ -37,8 +37,13 @@ public DropFunctionStmt(FunctionName functionName, FunctionArgsDef argsDef) { this.argsDef = argsDef; } - public FunctionName getFunctionName() { return functionName; } - public FunctionSearchDesc getFunction() { return function; } + public FunctionName getFunctionName() { + return functionName; + } + + public FunctionSearchDesc getFunction() { + return function; + } @Override public void analyze(Analyzer analyzer) throws UserException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExistsPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExistsPredicate.java index bdb2fbee0622c3..b061b19e2f47a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExistsPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExistsPredicate.java @@ -27,11 +27,12 @@ * Class representing a [NOT] EXISTS predicate. */ public class ExistsPredicate extends Predicate { - private static final Logger LOG = LoggerFactory.getLogger( - ExistsPredicate.class); + private static final Logger LOG = LoggerFactory.getLogger(ExistsPredicate.class); private boolean notExists = false; - public boolean isNotExists() { return notExists; } + public boolean isNotExists() { + return notExists; + } public ExistsPredicate(Subquery subquery, boolean notExists) { Preconditions.checkNotNull(subquery); @@ -56,7 +57,9 @@ protected void toThrift(TExprNode msg) { } @Override - public Expr clone() { return new ExistsPredicate(this); } + public Expr clone() { + return new ExistsPredicate(this); + } @Override public String toSqlImpl() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java index a77d151168da1f..6df62c42f4cd8e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java @@ -72,7 +72,7 @@ public class ExportStmt extends StatementBase { private Map properties = Maps.newHashMap(); private String columnSeparator; private String lineDelimiter; - private String columns ; + private String columns; private TableRef tableRef; @@ -243,17 +243,17 @@ public static String checkPath(String path, StorageBackend.StorageType type) thr if (schema == null || !schema.equalsIgnoreCase("s3")) { throw new AnalysisException("Invalid export path. please use valid 'S3://' path."); } - } else if (type == StorageBackend.StorageType.HDFS) { - if (schema == null || !schema.equalsIgnoreCase("hdfs")) { - throw new AnalysisException("Invalid export path. please use valid 'HDFS://' path."); - } - } else if (type == StorageBackend.StorageType.LOCAL) { - if (schema != null && !schema.equalsIgnoreCase("file")) { - throw new AnalysisException("Invalid export path. please use valid '" - + OutFileClause.LOCAL_FILE_PREFIX + "' path."); - } - path = path.substring(OutFileClause.LOCAL_FILE_PREFIX.length() - 1); + } else if (type == StorageBackend.StorageType.HDFS) { + if (schema == null || !schema.equalsIgnoreCase("hdfs")) { + throw new AnalysisException("Invalid export path. please use valid 'HDFS://' path."); } + } else if (type == StorageBackend.StorageType.LOCAL) { + if (schema != null && !schema.equalsIgnoreCase("file")) { + throw new AnalysisException( + "Invalid export path. please use valid '" + OutFileClause.LOCAL_FILE_PREFIX + "' path."); + } + path = path.substring(OutFileClause.LOCAL_FILE_PREFIX.length() - 1); + } return path; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java index 82130639657979..2bf7e5929f78d5 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java @@ -48,7 +48,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -77,8 +76,8 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl private final static com.google.common.base.Predicate IS_AGGREGATE_PREDICATE = new com.google.common.base.Predicate() { public boolean apply(Expr arg) { - return arg instanceof FunctionCallExpr && - ((FunctionCallExpr)arg).isAggregateFunction(); + return arg instanceof FunctionCallExpr + && ((FunctionCallExpr) arg).isAggregateFunction(); } }; @@ -87,8 +86,8 @@ public boolean apply(Expr arg) { new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { - return arg instanceof CompoundPredicate && - ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.NOT; + return arg instanceof CompoundPredicate + && ((CompoundPredicate) arg).getOp() == CompoundPredicate.Operator.NOT; } }; @@ -97,8 +96,8 @@ public boolean apply(Expr arg) { new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { - return arg instanceof CompoundPredicate && - ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.OR; + return arg instanceof CompoundPredicate + && ((CompoundPredicate) arg).getOp() == CompoundPredicate.Operator.OR; } }; @@ -113,33 +112,21 @@ public boolean apply(Expr arg) { // Returns true if an Expr is an aggregate function that returns non-null on // an empty set (e.g. count). - public final static com.google.common.base.Predicate - NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate() { - @Override - public boolean apply(Expr arg) { - return arg instanceof FunctionCallExpr && - ((FunctionCallExpr)arg).returnsNonNullOnEmpty(); - } - }; - - /* TODO(zc) - // Returns true if an Expr is a builtin aggregate function. - public final static com.google.common.base.Predicate IS_BUILTIN_AGG_FN = + public final static com.google.common.base.Predicate NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { - return arg instanceof FunctionCallExpr && - ((FunctionCallExpr)arg).getFnName().isBuiltin(); + return arg instanceof FunctionCallExpr && ((FunctionCallExpr) arg).returnsNonNullOnEmpty(); } }; - */ + // Returns true if an Expr is a builtin aggregate function. public final static com.google.common.base.Predicate CORRELATED_SUBQUERY_SUPPORT_AGG_FN = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { if (arg instanceof FunctionCallExpr) { - String fnName = ((FunctionCallExpr)arg).getFnName().getFunction(); + String fnName = ((FunctionCallExpr) arg).getFnName().getFunction(); return (fnName.equalsIgnoreCase("sum") || fnName.equalsIgnoreCase("max") || fnName.equalsIgnoreCase("min") @@ -156,7 +143,7 @@ public boolean apply(Expr arg) { new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { - return arg instanceof BoolLiteral && ((BoolLiteral)arg).getValue(); + return arg instanceof BoolLiteral && ((BoolLiteral) arg).getValue(); } }; @@ -164,26 +151,32 @@ public boolean apply(Expr arg) { new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { - return arg instanceof BoolLiteral && !((BoolLiteral)arg).getValue(); + return arg instanceof BoolLiteral && !((BoolLiteral) arg).getValue(); } }; public final static com.google.common.base.Predicate IS_EQ_BINARY_PREDICATE = new com.google.common.base.Predicate() { @Override - public boolean apply(Expr arg) { return BinaryPredicate.getEqSlots(arg) != null; } + public boolean apply(Expr arg) { + return BinaryPredicate.getEqSlots(arg) != null; + } }; public final static com.google.common.base.Predicate IS_BINARY_PREDICATE = new com.google.common.base.Predicate() { @Override - public boolean apply(Expr arg) { return arg instanceof BinaryPredicate; } + public boolean apply(Expr arg) { + return arg instanceof BinaryPredicate; + } }; public static final com.google.common.base.Predicate IS_NULL_LITERAL = new com.google.common.base.Predicate() { @Override - public boolean apply(Expr arg) { return arg instanceof NullLiteral; } + public boolean apply(Expr arg) { + return arg instanceof NullLiteral; + } }; public static final com.google.common.base.Predicate IS_VARCHAR_SLOT_REF_IMPLICIT_CAST = @@ -302,7 +295,8 @@ public boolean isAnalyzed() { return isAnalyzed; } - public void checkValueValid() throws AnalysisException {} + public void checkValueValid() throws AnalysisException { + } public ExprId getId() { return id; @@ -329,7 +323,9 @@ public double getSelectivity() { return selectivity; } - public boolean hasSelectivity() { return selectivity >= 0; } + public boolean hasSelectivity() { + return selectivity >= 0; + } public long getNumDistinctValues() { return numDistinctValues; @@ -353,14 +349,22 @@ public boolean isFilter() { return isFilter; } - public void setIsFilter(boolean v) { - isFilter = v; + public boolean isOnClauseConjunct() { + return isOnClauseConjunct; + } + + public void setIsOnClauseConjunct(boolean b) { + isOnClauseConjunct = b; + } + + public boolean isAuxExpr() { + return isAuxExpr; + } + + public void setIsAuxExpr() { + isAuxExpr = true; } - public boolean isOnClauseConjunct() { return isOnClauseConjunct; } - public void setIsOnClauseConjunct(boolean b) { isOnClauseConjunct = b; } - public boolean isAuxExpr() { return isAuxExpr; } - public void setIsAuxExpr() { isAuxExpr = true; } public Function getFn() { return fn; } @@ -384,8 +388,8 @@ public final void analyze(Analyzer analyzer) throws AnalysisException { // Check the expr child limit. if (children.size() > Config.expr_children_limit) { - throw new AnalysisException(String.format("Exceeded the maximum number of child " + - "expressions (%d).", Config.expr_children_limit)); + throw new AnalysisException(String.format("Exceeded the maximum number of child " + + "expressions (%d).", Config.expr_children_limit)); } // analyzer may be null for certain literal constructions (e.g. IntLiteral). @@ -393,14 +397,14 @@ public final void analyze(Analyzer analyzer) throws AnalysisException { analyzer.incrementCallDepth(); // Check the expr depth limit. Do not print the toSql() to not overflow the stack. if (analyzer.getCallDepth() > Config.expr_depth_limit) { - throw new AnalysisException(String.format("Exceeded the maximum depth of an " + - "expression tree (%s).", Config.expr_depth_limit)); + throw new AnalysisException(String.format("Exceeded the maximum depth of an " + + "expression tree (%s).", Config.expr_depth_limit)); } } else { throw new AnalysisException("analyzer is null."); } - for (Expr child: children) { + for (Expr child : children) { child.analyze(analyzer); } if (analyzer != null) { @@ -443,7 +447,7 @@ protected void computeNumDistinctValues() { List slotRefs = Lists.newArrayList(); this.collect(SlotRef.class, slotRefs); numDistinctValues = -1; - for (SlotRef slotRef: slotRefs) { + for (SlotRef slotRef : slotRefs) { numDistinctValues = Math.max(numDistinctValues, slotRef.numDistinctValues); } } @@ -547,14 +551,14 @@ public static boolean equalSets(List l1, List l2) { } public static HashMap toCountMap(List list) { - HashMap countMap = new HashMap(); + HashMap countMap = new HashMap(); for (int i = 0; i < list.size(); i++) { C obj = list.get(i); Integer count = (Integer) countMap.get(obj); if (count == null) { countMap.put(obj, 1); } else { - countMap.put(obj, count+1); + countMap.put(obj, count + 1); } } return countMap; @@ -609,22 +613,13 @@ public static ArrayList cloneAndResetList(List l) { * This can't go into TreeNode<>, because we'd be using the template param * NodeType. */ - public static void collectList(List input, Class cl, - List output) { + public static void collectList(List input, Class cl, List output) { Preconditions.checkNotNull(input); for (Expr e : input) { e.collect(cl, output); } } - public static void collectAggregateExprs(List input, - List output) { - Preconditions.checkNotNull(input); - for (Expr e : input) { - e.collectAggregateExprs(output); - } - } - /** * get the expr which in l1 and l2 in the same time. * Return the intersection of l1 and l2 @@ -632,7 +627,7 @@ public static void collectAggregateExprs(List i public static List intersect(List l1, List l2) { List result = new ArrayList(); - for (C element: l1) { + for (C element : l1) { if (l2.contains(element)) { result.add(element); } @@ -728,14 +723,14 @@ public Expr substitute(ExprSubstitutionMap smap, Analyzer analyzer, boolean pres } } - public static ArrayList trySubstituteList(Iterable exprs, + public static ArrayList trySubstituteList(Iterable exprs, ExprSubstitutionMap smap, Analyzer analyzer, boolean preserveRootTypes) throws AnalysisException { if (exprs == null) { return null; } ArrayList result = new ArrayList(); - for (Expr e: exprs) { + for (Expr e : exprs) { result.add(e.trySubstitute(smap, analyzer, preserveRootTypes)); } return result; @@ -819,7 +814,7 @@ public static boolean isBound(List exprs, List tids) { } public static void getIds(List exprs, List tupleIds, - List slotIds) { + List slotIds) { if (exprs == null) { return; } @@ -838,22 +833,22 @@ public void markAgg() { * the exprs have an invalid number of distinct values. */ public static long getNumDistinctValues(List exprs) { - if (exprs == null || exprs.isEmpty()) { - return 0; - } - long numDistinctValues = 1; - for (Expr expr: exprs) { - if (expr.getNumDistinctValues() == -1) { - numDistinctValues = -1; - break; + if (exprs == null || exprs.isEmpty()) { + return 0; } - numDistinctValues *= expr.getNumDistinctValues(); - } - return numDistinctValues; + long numDistinctValues = 1; + for (Expr expr : exprs) { + if (expr.getNumDistinctValues() == -1) { + numDistinctValues = -1; + break; + } + numDistinctValues *= expr.getNumDistinctValues(); + } + return numDistinctValues; } public void vectorizedAnalyze(Analyzer analyzer) { - for (Expr child: children) { + for (Expr child : children) { child.vectorizedAnalyze(analyzer); } } @@ -869,31 +864,6 @@ public void computeOutputColumn(Analyzer analyzer) { getIds(tupleIds, null); Preconditions.checkArgument(tupleIds.size() == 1); - //List reuseExprs = analyzer.getBufferReuseConjuncts(tupleIds.get(0)); - //for (Expr child : children) { - //if (child instanceof SlotRef) { - //if (!((SlotRef) child).getDesc().isMultiRef()) { - //LOG.debug("add " + child.debugString() + " to reuse exprs."); - //reuseExprs.add(child); - //} - //} else { - //LOG.debug("add " + child.debugString() + " to reuse exprs."); - //reuseExprs.add(child); - //} - //} - - //for (Expr reuseExpr : reuseExprs) { - //if (reuseExpr.getType() == PrimitiveType.getAssignmentCompatibleType(getType(), - //reuseExpr.getType())) { - //LOG.debug( - //"reuse " + reuseExpr.debugString() + " buffer for " + this.debugString()); - //outputColumn = reuseExpr.getOutputColumn(); - //Preconditions.checkArgument(outputColumn >= 0); - //reuseExprs.remove(reuseExpr); - //return; - //} - //} - int currentOutputColumn = analyzer.getCurrentOutputColumn(tupleIds.get(0)); this.outputColumn = currentOutputColumn; LOG.info(debugString() + " outputColumn: " + this.outputColumn); @@ -1022,27 +992,29 @@ public String debugString() { * Resets the internal state of this expr produced by analyze(). * Only modifies this expr, and not its child exprs. */ - protected void resetAnalysisState() { isAnalyzed = false; } + protected void resetAnalysisState() { + isAnalyzed = false; + } /** * Resets the internal analysis state of this expr tree. Removes implicit casts. */ public Expr reset() { - if (isImplicitCast()) { - return getChild(0).reset(); - } - for (int i = 0; i < children.size(); ++i) { - children.set(i, children.get(i).reset()); - } - resetAnalysisState(); - return this; + if (isImplicitCast()) { + return getChild(0).reset(); + } + for (int i = 0; i < children.size(); ++i) { + children.set(i, children.get(i).reset()); + } + resetAnalysisState(); + return this; } public static ArrayList resetList(ArrayList l) { - for (int i = 0; i < l.size(); ++i) { - l.set(i, l.get(i).reset()); - } - return l; + for (int i = 0; i < l.size(); ++i) { + l.set(i, l.get(i).reset()); + } + return l; } /** @@ -1101,8 +1073,8 @@ public int hashCode() { */ public List getConjuncts() { List list = Lists.newArrayList(); - if (this instanceof CompoundPredicate && ((CompoundPredicate) this).getOp() == - CompoundPredicate.Operator.AND) { + if (this instanceof CompoundPredicate + && ((CompoundPredicate) this).getOp() == CompoundPredicate.Operator.AND) { // TODO: we have to convert CompoundPredicate.AND to two expr trees for // conjuncts because NULLs are handled differently for CompoundPredicate.AND // and conjunct evaluation. This is not optimal for jitted exprs because it @@ -1189,7 +1161,7 @@ public boolean isBound(TupleId tid) { * Returns true if expr is fully bound by tids, otherwise false. */ public boolean isBoundByTupleIds(List tids) { - for (Expr child: children) { + for (Expr child : children) { if (!child.isBoundByTupleIds(tids)) { return false; } @@ -1317,8 +1289,8 @@ public void checkReturnsBool(String name, boolean printExpr) throws AnalysisExce } public Expr checkTypeCompatibility(Type targetType) throws AnalysisException { - if (targetType.getPrimitiveType() != PrimitiveType.ARRAY && - targetType.getPrimitiveType() == type.getPrimitiveType()) { + if (targetType.getPrimitiveType() != PrimitiveType.ARRAY + && targetType.getPrimitiveType() == type.getPrimitiveType()) { return this; } // bitmap must match exactly @@ -1350,8 +1322,8 @@ private void checkHllCompatibility() throws AnalysisException { } } else if (this instanceof FunctionCallExpr) { final FunctionCallExpr functionExpr = (FunctionCallExpr) this; - if (!functionExpr.getFnName().getFunction().equalsIgnoreCase("hll_hash") && - !functionExpr.getFnName().getFunction().equalsIgnoreCase("hll_empty")) { + if (!functionExpr.getFnName().getFunction().equalsIgnoreCase("hll_hash") + && !functionExpr.getFnName().getFunction().equalsIgnoreCase("hll_empty")) { throw new AnalysisException(hllMismatchLog); } } else { @@ -1379,7 +1351,7 @@ public final Expr castTo(Type targetType) throws AnalysisException { } if ((targetType.isStringType() || targetType.isHllType()) - && (this.type.isStringType() || this.type.isHllType())) { + && (this.type.isStringType() || this.type.isHllType())) { return this; } // Preconditions.checkState(PrimitiveType.isImplicitCast(type, targetType), "cast %s to %s", this.type, targetType); @@ -1582,14 +1554,14 @@ public SlotDescriptor findSrcScanSlot() { return null; } - public static double getConstFromExpr(Expr e) throws AnalysisException{ + public static double getConstFromExpr(Expr e) throws AnalysisException { Preconditions.checkState(e.isConstant()); double value = 0; - if( e instanceof LiteralExpr){ - LiteralExpr lit = (LiteralExpr)e; + if (e instanceof LiteralExpr) { + LiteralExpr lit = (LiteralExpr) e; value = lit.getDoubleValue(); } else { - throw new AnalysisException("To const value not a LiteralExpr " ); + throw new AnalysisException("To const value not a LiteralExpr "); } return value; } @@ -1675,7 +1647,7 @@ public static Expr pushNegationToOperands(Expr root) { try { // Make sure we call function 'negate' only on classes that support it, // otherwise we may recurse infinitely. - Method m = root.getChild(0).getClass().getDeclaredMethod(NEGATE_FN); + root.getChild(0).getClass().getDeclaredMethod(NEGATE_FN); return pushNegationToOperands(root.getChild(0).negate()); } catch (NoSuchMethodException e) { // The 'negate' function is not implemented. Break the recursion. @@ -1687,7 +1659,7 @@ public static Expr pushNegationToOperands(Expr root) { Expr left = pushNegationToOperands(root.getChild(0)); Expr right = pushNegationToOperands(root.getChild(1)); CompoundPredicate compoundPredicate = - new CompoundPredicate(((CompoundPredicate)root).getOp(), left, right); + new CompoundPredicate(((CompoundPredicate) root).getOp(), left, right); compoundPredicate.setPrintSqlInParens(root.getPrintSqlInParens()); return compoundPredicate; } @@ -1806,9 +1778,9 @@ public static void writeTo(Expr expr, DataOutput output) throws IOException { output.writeInt(ExprSerCode.FUNCTION_CALL.getCode()); } else if (expr instanceof ArrayLiteral) { output.writeInt(ExprSerCode.ARRAY_LITERAL.getCode()); - } else if (expr instanceof CastExpr){ + } else if (expr instanceof CastExpr) { output.writeInt(ExprSerCode.CAST_EXPR.getCode()); - }else { + } else { throw new IOException("Unknown class " + expr.getClass().getName()); } expr.write(output); @@ -1895,7 +1867,7 @@ public String getStringValue() { } public static Expr getFirstBoundChild(Expr expr, List tids) { - for (Expr child: expr.getChildren()) { + for (Expr child : expr.getChildren()) { if (child.isBoundByTupleIds(tids)) { return child; } @@ -1913,7 +1885,7 @@ public boolean isContainsFunction(String functionName) { if (fn.functionName().equalsIgnoreCase(functionName)) { return true; } - for (Expr child: children) { + for (Expr child : children) { if (child.isContainsFunction(functionName)) { return true; } @@ -1928,7 +1900,7 @@ public boolean isContainsClass(String className) { if (this.getClass().getName().equalsIgnoreCase(className)) { return true; } - for (Expr child: children) { + for (Expr child : children) { if (child.isContainsClass(className)) { return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java index 8507fd26a52b88..79b2fc721ee04d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java @@ -37,9 +37,14 @@ public ExprId(int id) { public static IdGenerator createGenerator() { return new IdGenerator() { @Override - public ExprId getNextId() { return new ExprId(nextId++); } + public ExprId getNextId() { + return new ExprId(nextId++); + } + @Override - public ExprId getMaxId() { return new ExprId(nextId - 1); } + public ExprId getMaxId() { + return new ExprId(nextId - 1); + } }; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java index b7d44fe501a324..46b9caa0fa535e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java @@ -157,10 +157,17 @@ public void substituteLhs(ExprSubstitutionMap lhsSmap, Analyzer analyzer) { lhs = Expr.substituteList(lhs, lhsSmap, analyzer, false); } - public List getLhs() { return lhs; } - public List getRhs() { return rhs; } + public List getLhs() { + return lhs; + } + + public List getRhs() { + return rhs; + } - public int size() { return lhs.size(); } + public int size() { + return lhs.size(); + } public String debugString() { Preconditions.checkState(lhs.size() == rhs.size()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java index 7789202e884ede..220fe552993feb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java @@ -192,7 +192,7 @@ public FEFunctionSignature getSignature() { public LiteralExpr invoke(List args) throws AnalysisException { final List invokeArgs = createInvokeArgs(args); try { - return (LiteralExpr)method.invoke(null, invokeArgs.toArray()); + return (LiteralExpr) method.invoke(null, invokeArgs.toArray()); } catch (InvocationTargetException | IllegalAccessException | IllegalArgumentException e) { throw new AnalysisException(e.getLocalizedMessage()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FloatLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FloatLiteral.java index 1bceef48defdb6..ea1322835d4fea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FloatLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FloatLiteral.java @@ -201,7 +201,7 @@ public int hashCode() { return 31 * super.hashCode() + Double.hashCode(value); } - private String timeStrFromFloat (double time) { + private String timeStrFromFloat(double time) { String timeStr = ""; if (time < 0) { @@ -209,7 +209,7 @@ private String timeStrFromFloat (double time) { time = -time; } int hour = (int) (time / 60 / 60); - int minute = (int)((time / 60)) % 60; + int minute = (int) ((time / 60)) % 60; int second = (int) (time) % 60; return "'" + timeStr + String.format("%02d:%02d:%02d", hour, minute, second) + "'"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java index 4906de085a000c..dfb247a7c4b7c0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java @@ -22,7 +22,6 @@ import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Table; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ErrorCode; @@ -59,8 +58,14 @@ public FromClause(List tableRefs) { } } - public FromClause() { tablerefs = Lists.newArrayList(); } - public List getTableRefs() { return tablerefs; } + public FromClause() { + tablerefs = Lists.newArrayList(); + } + + public List getTableRefs() { + return tablerefs; + } + public void setNeedToSql(boolean needToSql) { this.needToSql = needToSql; } @@ -84,7 +89,7 @@ private void checkFromHiveTable(Analyzer analyzer) throws AnalysisException { Database db = analyzer.getCatalog().getDbOrAnalysisException(dbName); String tblName = tableName.getTbl(); - Table table = db.getTableOrAnalysisException(tblName); + db.getTableOrAnalysisException(tblName); } } @@ -158,7 +163,7 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { public FromClause clone() { ArrayList clone = Lists.newArrayList(); - for (TableRef tblRef: tablerefs) { + for (TableRef tblRef : tablerefs) { clone.add(tblRef.clone()); } return new FromClause(clone); @@ -166,20 +171,6 @@ public FromClause clone() { public void reset() { for (int i = 0; i < size(); ++i) { - TableRef origTblRef = get(i); - // TODO(zc): - // if (origTblRef.isResolved() && !(origTblRef instanceof InlineViewRef)) { - // // Replace resolved table refs with unresolved ones. - // TableRef newTblRef = new TableRef(origTblRef); - // // Use the fully qualified raw path to preserve the original resolution. - // // Otherwise, non-fully qualified paths might incorrectly match a local view. - // // TODO for 2.3: This full qualification preserves analysis state which is - // // contrary to the intended semantics of reset(). We could address this issue by - // // changing the WITH-clause analysis to register local views that have - // // fully-qualified table refs, and then remove the full qualification here. - // newTblRef.rawPath_ = origTblRef.getResolvedPath().getFullyQualifiedRawPath(); - // set(i, newTblRef); - // } get(i).reset(); } this.analyzed = false; @@ -208,14 +199,36 @@ public String toDigest() { return builder.toString(); } - public boolean isEmpty() { return tablerefs.isEmpty(); } + public boolean isEmpty() { + return tablerefs.isEmpty(); + } @Override - public Iterator iterator() { return tablerefs.iterator(); } - public int size() { return tablerefs.size(); } - public TableRef get(int i) { return tablerefs.get(i); } - public void set(int i, TableRef tableRef) { tablerefs.set(i, tableRef); } - public void add(TableRef t) { tablerefs.add(t); } - public void addAll(List t) { tablerefs.addAll(t); } - public void clear() { tablerefs.clear(); } + public Iterator iterator() { + return tablerefs.iterator(); + } + + public int size() { + return tablerefs.size(); + } + + public TableRef get(int i) { + return tablerefs.get(i); + } + + public void set(int i, TableRef tableRef) { + tablerefs.set(i, tableRef); + } + + public void add(TableRef t) { + tablerefs.add(t); + } + + public void addAll(List t) { + tablerefs.addAll(t); + } + + public void clear() { + tablerefs.clear(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionArgsDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionArgsDef.java index d0b07e9bea59ee..cdc6c62d6843b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionArgsDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionArgsDef.java @@ -37,8 +37,13 @@ public FunctionArgsDef(List argTypeDefs, boolean isVariadic) { this.isVariadic = isVariadic; } - public Type[] getArgTypes() { return argTypes; } - public boolean isVariadic() { return isVariadic; } + public Type[] getArgTypes() { + return argTypes; + } + + public boolean isVariadic() { + return isVariadic; + } public void analyze(Analyzer analyzer) throws AnalysisException { argTypes = new Type[argTypeDefs.size()]; @@ -71,5 +76,7 @@ public String toSql() { } @Override - public String toString() { return toSql(); } + public String toString() { + return toSql(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java index e5f45317bd3b03..75d9800dbcdb03 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java @@ -248,21 +248,21 @@ private String paramsToSql() { } int len = children.size(); List result = Lists.newArrayList(); - if (fnName.getFunction().equalsIgnoreCase("json_array") || - fnName.getFunction().equalsIgnoreCase("json_object")) { + if (fnName.getFunction().equalsIgnoreCase("json_array") + || fnName.getFunction().equalsIgnoreCase("json_object")) { len = len - 1; } - if (fnName.getFunction().equalsIgnoreCase("aes_decrypt") || - fnName.getFunction().equalsIgnoreCase("aes_encrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_decrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_encrypt")) { + if (fnName.getFunction().equalsIgnoreCase("aes_decrypt") + || fnName.getFunction().equalsIgnoreCase("aes_encrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_decrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_encrypt")) { len = len - 1; } for (int i = 0; i < len; ++i) { - if (i == 1 && (fnName.getFunction().equalsIgnoreCase("aes_decrypt") || - fnName.getFunction().equalsIgnoreCase("aes_encrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_decrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_encrypt"))) { + if (i == 1 && (fnName.getFunction().equalsIgnoreCase("aes_decrypt") + || fnName.getFunction().equalsIgnoreCase("aes_encrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_decrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_encrypt"))) { result.add("\'***\'"); } else { result.add(children.get(i).toSql()); @@ -283,9 +283,9 @@ public String toSqlImpl() { StringBuilder sb = new StringBuilder(); sb.append(((FunctionCallExpr) expr).fnName); sb.append(paramsToSql()); - if (fnName.getFunction().equalsIgnoreCase("json_quote") || - fnName.getFunction().equalsIgnoreCase("json_array") || - fnName.getFunction().equalsIgnoreCase("json_object")) { + if (fnName.getFunction().equalsIgnoreCase("json_quote") + || fnName.getFunction().equalsIgnoreCase("json_array") + || fnName.getFunction().equalsIgnoreCase("json_object")) { return forJSON(sb.toString()); } return sb.toString(); @@ -303,21 +303,21 @@ private String paramsToDigest() { } int len = children.size(); List result = Lists.newArrayList(); - if (fnName.getFunction().equalsIgnoreCase("json_array") || - fnName.getFunction().equalsIgnoreCase("json_object")) { + if (fnName.getFunction().equalsIgnoreCase("json_array") + || fnName.getFunction().equalsIgnoreCase("json_object")) { len = len - 1; } - if (fnName.getFunction().equalsIgnoreCase("aes_decrypt") || - fnName.getFunction().equalsIgnoreCase("aes_encrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_decrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_encrypt")) { + if (fnName.getFunction().equalsIgnoreCase("aes_decrypt") + || fnName.getFunction().equalsIgnoreCase("aes_encrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_decrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_encrypt")) { len = len - 1; } for (int i = 0; i < len; ++i) { - if (i == 1 && (fnName.getFunction().equalsIgnoreCase("aes_decrypt") || - fnName.getFunction().equalsIgnoreCase("aes_encrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_decrypt") || - fnName.getFunction().equalsIgnoreCase("sm4_encrypt"))) { + if (i == 1 && (fnName.getFunction().equalsIgnoreCase("aes_decrypt") + || fnName.getFunction().equalsIgnoreCase("aes_encrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_decrypt") + || fnName.getFunction().equalsIgnoreCase("sm4_encrypt"))) { result.add("\'***\'"); } else { result.add(children.get(i).toDigest()); @@ -338,9 +338,9 @@ public String toDigestImpl() { StringBuilder sb = new StringBuilder(); sb.append(((FunctionCallExpr) expr).fnName); sb.append(paramsToDigest()); - if (fnName.getFunction().equalsIgnoreCase("json_quote") || - fnName.getFunction().equalsIgnoreCase("json_array") || - fnName.getFunction().equalsIgnoreCase("json_object")) { + if (fnName.getFunction().equalsIgnoreCase("json_quote") + || fnName.getFunction().equalsIgnoreCase("json_array") + || fnName.getFunction().equalsIgnoreCase("json_object")) { return forJSON(sb.toString()); } return sb.toString(); @@ -560,8 +560,8 @@ private void analyzeBuiltinAggFunction(Analyzer analyzer) throws AnalysisExcepti if (fnName.getFunction().equalsIgnoreCase(FunctionSet.INTERSECT_COUNT)) { if (children.size() <= 2) { - throw new AnalysisException("intersect_count(bitmap_column, column_to_filter, filter_values) " + - "function requires at least three parameters"); + throw new AnalysisException("intersect_count(bitmap_column, column_to_filter, filter_values) " + + "function requires at least three parameters"); } Type inputType = getChild(0).getType(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionName.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionName.java index 754a9cf924b928..58cf458f947370 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionName.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionName.java @@ -144,9 +144,8 @@ public void analyze(Analyzer analyzer) throws AnalysisException { } for (int i = 0; i < fn.length(); ++i) { if (!isValidCharacter(fn.charAt(i))) { - throw new AnalysisException( - "Function names must be all alphanumeric or underscore. " + - "Invalid name: " + fn); + throw new AnalysisException("Function names must be all alphanumeric or underscore. " + + "Invalid name: " + fn); } } if (Character.isDigit(fn.charAt(0))) { @@ -163,13 +162,6 @@ public void analyze(Analyzer analyzer) throws AnalysisException { } db = ClusterNamespace.getFullName(analyzer.getClusterName(), db); } - - // If the function name is not fully qualified, it must not be the same as a builtin -// if (!isFullyQualified() && OpcodeRegistry.instance().getFunctionOperator( -// getFunction()) != FunctionOperator.INVALID_OPERATOR) { -// throw new AnalysisException( -// "Function cannot have the same name as a builtin: " + getFunction()); -// } } private boolean isValidCharacter(char c) { @@ -201,7 +193,7 @@ public void readFields(DataInput in) throws IOException { fn = Text.readString(in); } - public static FunctionName read(DataInput in) throws IOException{ + public static FunctionName read(DataInput in) throws IOException { FunctionName functionName = new FunctionName(); functionName.readFields(in); return functionName; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java index be915b9d2797d0..c137b31a19063f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java @@ -159,7 +159,7 @@ public static void checkPrivileges(Analyzer analyzer, List privil if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } - } else if (tblPattern.getPrivLevel() == PrivLevel.DATABASE){ + } else if (tblPattern.getPrivLevel() == PrivLevel.DATABASE) { if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), tblPattern.getQualifiedDb(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupingInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupingInfo.java index 7968fb305da1b0..3387d17dcb1d6b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupingInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupingInfo.java @@ -179,7 +179,7 @@ public void substituteGroupingFn(List exprs, Analyzer analyzer) throws Ana public void substituteGroupingFn(Expr expr, Analyzer analyzer) throws AnalysisException { if (expr instanceof GroupingFunctionCallExpr) { // TODO(yangzhengguo) support expression in grouping functions - for (Expr child: expr.getChildren()) { + for (Expr child : expr.getChildren()) { if (!(child instanceof SlotRef)) { throw new AnalysisException("grouping functions only support column in current version."); // expr from inline view diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InPredicate.java index de9d7ffad7d37a..32f9bd5ffb97b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InPredicate.java @@ -50,7 +50,7 @@ public class InPredicate extends Predicate { private static final String IN_SET_LOOKUP = "in_set_lookup"; private static final String NOT_IN_SET_LOOKUP = "not_in_set_lookup"; - private static final String IN_ITERATE= "in_iterate"; + private static final String IN_ITERATE = "in_iterate"; private static final String NOT_IN_ITERATE = "not_in_iterate"; private final boolean isNotIn; private static final String IN = "in"; @@ -59,7 +59,7 @@ public class InPredicate extends Predicate { private static final NullLiteral NULL_LITERAL = new NullLiteral(); public static void initBuiltins(FunctionSet functionSet) { - for (Type t: Type.getSupportedTypes()) { + for (Type t : Type.getSupportedTypes()) { if (t.isNull()) { continue; } @@ -129,8 +129,7 @@ public InPredicate(Expr compareExpr, Expr subquery, boolean isNotIn) { */ @Override public Expr negate() { - return new InPredicate(getChild(0), children.subList(1, children.size()), - !isNotIn); + return new InPredicate(getChild(0), children.subList(1, children.size()), !isNotIn); } public List getListChildren() { @@ -150,19 +149,10 @@ public boolean isLiteralChildren() { return true; } - @Override - public void vectorizedAnalyze(Analyzer analyzer) { + @Override + public void vectorizedAnalyze(Analyzer analyzer) { super.vectorizedAnalyze(analyzer); - - PrimitiveType type = getChild(0).getType().getPrimitiveType(); - -// OpcodeRegistry.BuiltinFunction match = OpcodeRegistry.instance().getFunctionInfo( -// FunctionOperator.FILTER_IN, true, true, type); -// Preconditions.checkState(match != null); -// Preconditions.checkState(match.getReturnType().equals(Type.BOOLEAN)); -// this.vectorOpcode = match.opcode; -// LOG.info(debugString() + " opcode: " + vectorOpcode); - } + } @Override public void analyzeImpl(Analyzer analyzer) throws AnalysisException { @@ -172,13 +162,11 @@ public void analyzeImpl(Analyzer analyzer) throws AnalysisException { // An [NOT] IN predicate with a subquery must contain two children, the second of // which is a Subquery. if (children.size() != 2 || !(getChild(1) instanceof Subquery)) { - throw new AnalysisException("Unsupported IN predicate with a subquery: " + - toSql()); + throw new AnalysisException("Unsupported IN predicate with a subquery: " + toSql()); } - Subquery subquery = (Subquery)getChild(1); + Subquery subquery = (Subquery) getChild(1); if (!subquery.returnsScalarColumn()) { - throw new AnalysisException("Subquery must return a single column: " + - subquery.toSql()); + throw new AnalysisException("Subquery must return a single column: " + subquery.toSql()); } // Ensure that the column in the lhs of the IN predicate and the result of diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/IndexDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/IndexDef.java index 8624bb3fe39f24..fd67d085c5487a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/IndexDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/IndexDef.java @@ -133,8 +133,8 @@ public void checkColumn(Column column, KeysType keysType) throws AnalysisExcepti if (indexType == IndexType.BITMAP) { String indexColName = column.getName(); PrimitiveType colType = column.getDataType(); - if (!(colType.isDateType() || colType.isDecimalV2Type() || colType.isFixedPointType() || - colType.isStringType() || colType == PrimitiveType.BOOLEAN)) { + if (!(colType.isDateType() || colType.isDecimalV2Type() || colType.isFixedPointType() + || colType.isStringType() || colType == PrimitiveType.BOOLEAN)) { throw new AnalysisException(colType + " is not supported in bitmap index. " + "invalid column: " + indexColName); } else if ((keysType == KeysType.AGG_KEYS && !column.isKey())) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InformationFunction.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InformationFunction.java index a9fd4a3f6e97b0..6ae4588f5fbcf9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InformationFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InformationFunction.java @@ -50,7 +50,9 @@ public String getIntValue() { return String.valueOf(intValue); } - public String getFuncType() {return funcType; } + public String getFuncType() { + return funcType; + } @Override public Expr clone() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java index 835fd21f5e4f3f..53689a405212a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java @@ -136,7 +136,9 @@ protected InlineViewRef(InlineViewRef other) { baseTblSmap = other.baseTblSmap.clone(); } - public List getExplicitColLabels() { return explicitColLabels; } + public List getExplicitColLabels() { + return explicitColLabels; + } public List getColLabels() { if (explicitColLabels != null) { @@ -148,12 +150,12 @@ public List getColLabels() { @Override public void reset() { - super.reset(); - queryStmt.reset(); - inlineViewAnalyzer = null; - materializedTupleIds.clear(); - sMap.clear(); - baseTblSmap.clear(); + super.reset(); + queryStmt.reset(); + inlineViewAnalyzer = null; + materializedTupleIds.clear(); + sMap.clear(); + baseTblSmap.clear(); } @Override @@ -378,19 +380,6 @@ private boolean requiresNullWrapping(Analyzer analyzer, Expr expr, ExprSubstitut return true; } return true; - -// // Replace all SlotRefs in expr with NullLiterals, and wrap the result -// // into an IS NOT NULL predicate. -// Expr isNotNullLiteralPred = new IsNullPredicate(expr.clone(nullSMap), true); -// Preconditions.checkState(isNotNullLiteralPred.isConstant()); -// // analyze to insert casts, etc. -// try { -// isNotNullLiteralPred.analyze(analyzer); -// } catch (AnalysisException e) { -// // this should never happen -// throw new InternalException("couldn't analyze predicate " + isNotNullLiteralPred.toSql(), e); -// } -// return FeSupport.EvalPredicate(isNotNullLiteralPred, analyzer.getQueryGlobals()); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java index 3c413815763077..ff0fbd71d3b830 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java @@ -419,7 +419,8 @@ private void analyzeSubquery(Analyzer analyzer) throws UserException { // hll column mush in mentionedColumns for (Column col : targetTable.getBaseSchema()) { if (col.getType().isObjectStored() && !mentionedColumns.contains(col.getName())) { - throw new AnalysisException (" object-stored column " + col.getName() + " mush in insert into columns"); + throw new AnalysisException(" object-stored column " + col.getName() + + " mush in insert into columns"); } } } @@ -483,7 +484,7 @@ private void analyzeSubquery(Analyzer analyzer) throws UserException { } // Check if all columns mentioned is enough - checkColumnCoverage(mentionedColumns, targetTable.getBaseSchema()) ; + checkColumnCoverage(mentionedColumns, targetTable.getBaseSchema()); // handle VALUES() or SELECT constant list if (isValuesOrConstantSelect) { @@ -686,8 +687,7 @@ public void prepareExpressions() throws UserException { */ Preconditions.checkState(col.isAllowNull()); resultExprs.add(NullLiteral.create(col.getType())); - } - else { + } else { StringLiteral defaultValueExpr = new StringLiteral(col.getDefaultValue()); resultExprs.add(defaultValueExpr.checkTypeCompatibility(col.getType())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java index 619fe484e099cb..a196c89f55800b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java @@ -39,19 +39,19 @@ public class IsNullPredicate extends Predicate { private static final String IS_NOT_NULL = "is_not_null_pred"; public static void initBuiltins(FunctionSet functionSet) { - for (Type t: Type.getSupportedTypes()) { + for (Type t : Type.getSupportedTypes()) { if (t.isNull()) { continue; } String isNullSymbol; if (t == Type.BOOLEAN) { - isNullSymbol = "_ZN5doris15IsNullPredicate7is_nullIN9doris_udf10BooleanValE" + - "EES3_PNS2_15FunctionContextERKT_"; + isNullSymbol = "_ZN5doris15IsNullPredicate7is_nullIN9doris_udf10BooleanValE" + + "EES3_PNS2_15FunctionContextERKT_"; } else { String udfType = Function.getUdfType(t.getPrimitiveType()); - isNullSymbol = "_ZN5doris15IsNullPredicate7is_nullIN9doris_udf" + - udfType.length() + udfType + - "EEENS2_10BooleanValEPNS2_15FunctionContextERKT_"; + isNullSymbol = "_ZN5doris15IsNullPredicate7is_nullIN9doris_udf" + + udfType.length() + udfType + + "EEENS2_10BooleanValEPNS2_15FunctionContextERKT_"; } functionSet.addBuiltinBothScalaAndVectorized(ScalarFunction.createBuiltinOperator( @@ -150,7 +150,7 @@ public boolean isNullable() { public Expr getResultValue() throws AnalysisException { recursiveResetChildrenResult(); final Expr childValue = getChild(0); - if(!(childValue instanceof LiteralExpr)) { + if (!(childValue instanceof LiteralExpr)) { return this; } return childValue instanceof NullLiteral ? new BoolLiteral(!isNotNull) : new BoolLiteral(isNotNull); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/JoinOperator.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/JoinOperator.java index c7e532465e826f..f0f6356ff2f4a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/JoinOperator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/JoinOperator.java @@ -66,9 +66,9 @@ public boolean isSemiAntiJoin() { } public boolean isSemiJoin() { - return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN || - this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN || - this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; + return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN + || this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN + || this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; } public boolean isLeftSemiJoin() { @@ -80,8 +80,8 @@ public boolean isInnerJoin() { } public boolean isAntiJoin() { - return this == JoinOperator.LEFT_ANTI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN || - this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; + return this == JoinOperator.LEFT_ANTI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN + || this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; } public boolean isCrossJoin() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java index 99162b408478c0..1c12ad69e5951c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java @@ -89,8 +89,8 @@ public class LoadStmt extends DdlStmt { // mini load params public static final String KEY_IN_PARAM_COLUMNS = "columns"; - public static final String KEY_IN_PARAM_SET= "set"; - public static final String KEY_IN_PARAM_HLL= "hll"; + public static final String KEY_IN_PARAM_SET = "set"; + public static final String KEY_IN_PARAM_HLL = "hll"; public static final String KEY_IN_PARAM_COLUMN_SEPARATOR = "column_separator"; public static final String KEY_IN_PARAM_LINE_DELIMITER = "line_delimiter"; public static final String KEY_IN_PARAM_PARTITIONS = "partitions"; @@ -346,9 +346,9 @@ public void analyze(Analyzer analyzer) throws UserException { if (brokerDesc != null && !brokerDesc.isMultiLoadBroker()) { for (int i = 0; i < dataDescription.getFilePaths().size(); i++) { dataDescription.getFilePaths().set(i, - brokerDesc.convertPathToS3(dataDescription.getFilePaths().get(i))); + brokerDesc.convertPathToS3(dataDescription.getFilePaths().get(i))); dataDescription.getFilePaths().set(i, - ExportStmt.checkPath(dataDescription.getFilePaths().get(i), brokerDesc.getStorageType())); + ExportStmt.checkPath(dataDescription.getFilePaths().get(i), brokerDesc.getStorageType())); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTablesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTablesStmt.java index d62921ca2c40a8..924187ba906e33 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTablesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTablesStmt.java @@ -19,7 +19,6 @@ import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Table; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; @@ -61,7 +60,7 @@ public void analyze(Analyzer analyzer) throws UserException { ErrorReport.reportAnalysisException(ErrorCode.ERR_UNKNOWN_TABLE, tableName, dbName); } Database db = analyzer.getCatalog().getDbOrAnalysisException(dbName); - Table table = db.getTableOrAnalysisException(tableName); + db.getTableOrAnalysisException(tableName); // check auth if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbName, diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnHLLUnionPattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnHLLUnionPattern.java index f6f6c7364c220d..eb867e500af62f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnHLLUnionPattern.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnHLLUnionPattern.java @@ -32,7 +32,7 @@ public boolean match(Expr expr) { return false; } String fnNameString = fnExpr.getFnName().getFunction(); - if (!fnNameString.equalsIgnoreCase(FunctionSet.HLL_UNION)){ + if (!fnNameString.equalsIgnoreCase(FunctionSet.HLL_UNION)) { return false; } if (fnExpr.getChild(0) instanceof SlotRef) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnItem.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnItem.java index dea62f68fe462f..fabf5b1020565b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnItem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnItem.java @@ -114,7 +114,7 @@ public Column toMVColumn(OlapTable olapTable) throws DdlException { result.setIsKey(isKey); // If the mv column type is inconsistent with the base column type, the daily test will core. // So, I comment this line firstly. -// result.setType(type); + // result.setType(type); result.setAggregationType(aggregationType, isAggregationTypeImplicit); return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyColumnClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyColumnClause.java index 9783396a581757..afa369f089ca2a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyColumnClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyColumnClause.java @@ -37,7 +37,9 @@ public class ModifyColumnClause extends AlterTableClause { // set in analyze private Column column; - public Column getColumn() { return column; } + public Column getColumn() { + return column; + } public ColumnPosition getColPos() { return colPos; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/NullLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/NullLiteral.java index 144915fabb6b20..db2413fd8ea6d6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NullLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NullLiteral.java @@ -61,8 +61,8 @@ protected NullLiteral(NullLiteral other) { @Override protected void resetAnalysisState() { - super.resetAnalysisState(); - type = Type.NULL; + super.resetAnalysisState(); + type = Type.NULL; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java index 3743f055e5e53d..c9d96ed043eebe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java @@ -75,8 +75,8 @@ public static List reverse(List src) { for (int i = 0; i < src.size(); ++i) { OrderByElement element = src.get(i); OrderByElement reverseElement = - new OrderByElement(element.getExpr().clone(), !element.isAsc, - Boolean.valueOf(!nullsFirst(element.nullsFirstParam, element.isAsc))); + new OrderByElement(element.getExpr().clone(), !element.isAsc, + !nullsFirst(element.nullsFirstParam, element.isAsc)); result.add(reverseElement); } @@ -88,7 +88,7 @@ public static List reverse(List src) { public static List getOrderByExprs(List src) { List result = Lists.newArrayListWithCapacity(src.size()); - for (OrderByElement element: src) { + for (OrderByElement element : src) { result.add(element.getExpr()); } @@ -104,7 +104,7 @@ public static ArrayList substitute(List src, ExprSubstitutionMap smap, Analyzer analyzer) throws AnalysisException { ArrayList result = Lists.newArrayListWithCapacity(src.size()); - for (OrderByElement element: src) { + for (OrderByElement element : src) { result.add(new OrderByElement(element.getExpr().substitute(smap, analyzer, false), element.isAsc, element.nullsFirstParam)); } @@ -164,7 +164,7 @@ public boolean equals(Object obj) { return false; } - OrderByElement o = (OrderByElement)obj; + OrderByElement o = (OrderByElement) obj; return expr.equals(o.expr) && isAsc == o.isAsc && nullsFirstParam == o.nullsFirstParam; } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java index 9aa8253dabb434..20d552cd4fce1b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java @@ -240,8 +240,8 @@ private void analyzeForParquetFormat(List resultExprs) throws AnalysisExce case DATE: case DATETIME: if (!type.equals("int64")) { - throw new AnalysisException("project field type is BIGINT/DATE/DATETIME, should use int64, " + - "but the definition type of column " + i + " is " + type); + throw new AnalysisException("project field type is BIGINT/DATE/DATETIME, should use int64, " + + "but the definition type of column " + i + " is " + type); } break; case FLOAT: @@ -261,16 +261,16 @@ private void analyzeForParquetFormat(List resultExprs) throws AnalysisExce case STRING: case DECIMALV2: if (!type.equals("byte_array")) { - throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL, should use byte_array, " + - "but the definition type of column " + i + " is " + type); + throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL, should use byte_array, " + + "but the definition type of column " + i + " is " + type); } break; case HLL: case BITMAP: if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isReturnObjectDataAsBinary()) { if (!type.equals("byte_array")) { - throw new AnalysisException("project field type is HLL/BITMAP, should use byte_array, " + - "but the definition type of column " + i + " is " + type); + throw new AnalysisException("project field type is HLL/BITMAP, should use byte_array, " + + "but the definition type of column " + i + " is " + type); } } else { throw new AnalysisException("Parquet format does not support column type: " + resultType.getPrimitiveType()); @@ -490,7 +490,7 @@ private void getParquetProperties(Set processedPropKeys) throws Analysis throw new AnalysisException("currently only support required type"); } if (!PARQUET_DATA_TYPES.contains(properties[1])) { - throw new AnalysisException("data type is not supported:"+properties[1]); + throw new AnalysisException("data type is not supported:" + properties[1]); } List column = new ArrayList<>(); column.addAll(Arrays.asList(properties)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java index da019d716bae05..767e3626b4a6d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java @@ -151,7 +151,7 @@ public String toSql() { valueStr = valueStr.substring(1, valueStr.length() - 1); } sb.append(valueStr); - if (i < inValues.size() -1) { + if (i < inValues.size() - 1) { sb.append(","); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PauseRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PauseRoutineLoadStmt.java index 49f07f917ff445..f3596859009dae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PauseRoutineLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PauseRoutineLoadStmt.java @@ -47,7 +47,7 @@ public String getName() { return labelName.getLabelName(); } - public String getDbFullName(){ + public String getDbFullName() { return db; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java index a9cfed1f86587f..c34e9f7728dbdd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java @@ -62,8 +62,7 @@ protected void analyzeImpl(Analyzer analyzer) throws AnalysisException { * This will pick up something like "col = 5", but not "2 * col = 10", which is * what we want. */ - public boolean isSingleColumnPredicate(Reference slotRefRef, - Reference idxRef) { + public boolean isSingleColumnPredicate(Reference slotRefRef, Reference idxRef) { // find slotref SlotRef slotRef = null; int i = 0; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java index 7e767a994fec72..5ca604421446f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java @@ -186,8 +186,8 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { private void analyzeLimit(Analyzer analyzer) throws AnalysisException { // TODO chenhao if (limitElement.getOffset() > 0 && !hasOrderByClause()) { - throw new AnalysisException("OFFSET requires an ORDER BY clause: " + - limitElement.toSql().trim()); + throw new AnalysisException("OFFSET requires an ORDER BY clause: " + + limitElement.toSql().trim()); } limitElement.analyze(analyzer); } @@ -241,8 +241,8 @@ public List getCorrelatedTupleIds(Analyzer analyzer) throws AnalysisExc }*/ if (correlatedRef != null && absoluteRef != null) { throw new AnalysisException(String.format( - "Nested query is illegal because it contains a table reference '%s' " + - "correlated with an outer block as well as an uncorrelated one '%s':\n%s", + "Nested query is illegal because it contains a table reference '%s' " + + "correlated with an outer block as well as an uncorrelated one '%s':\n%s", correlatedRef.tableRefToSql(), absoluteRef.tableRefToSql(), toSql())); } tblRefIds.add(tblRef.getId()); @@ -314,8 +314,8 @@ protected void createSortInfo(Analyzer analyzer) throws AnalysisException { } if (!analyzer.isRootAnalyzer() && hasOffset() && !hasLimit()) { - throw new AnalysisException("Order-by with offset without limit not supported" + - " in nested queries."); + throw new AnalysisException("Order-by with offset without limit not supported" + + " in nested queries."); } sortInfo = new SortInfo(orderingExprs, isAscOrder, nullsFirstParams); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java index a95ff31a77dbc2..ebcee3c319a005 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java @@ -39,8 +39,8 @@ public RangePartitionDesc(List partitionColNames, @Override public void checkPartitionKeyValueType(PartitionKeyDesc partitionKeyDesc) throws AnalysisException { - if (partitionKeyDesc.getPartitionType() != PartitionKeyValueType.FIXED && - partitionKeyDesc.getPartitionType() != PartitionKeyValueType.LESS_THAN) { + if (partitionKeyDesc.getPartitionType() != PartitionKeyValueType.FIXED + && partitionKeyDesc.getPartitionType() != PartitionKeyValueType.LESS_THAN) { throw new AnalysisException("You can only use fixed or less than values to create range partitions"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResumeRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResumeRoutineLoadStmt.java index 888d0327ab6f0c..4ed739e8f6873f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResumeRoutineLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResumeRoutineLoadStmt.java @@ -30,7 +30,7 @@ syntax: RESUME ROUTINE LOAD [database.]name */ -public class ResumeRoutineLoadStmt extends DdlStmt{ +public class ResumeRoutineLoadStmt extends DdlStmt { private final LabelName labelName; private String db; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java index ea0b20d7b2517a..d7dbfab53ec1e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java @@ -161,9 +161,10 @@ private void checkDataSourceProperties() throws UserException { */ private void checkKafkaProperties() throws UserException { ImmutableSet propertySet = isAlter ? CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET : DATA_SOURCE_PROPERTIES_SET; - Optional optional = properties.keySet().stream().filter( - entity -> !propertySet.contains(entity)).filter( - entity -> !entity.startsWith("property.")).findFirst(); + Optional optional = properties.keySet().stream() + .filter(entity -> !propertySet.contains(entity)) + .filter(entity -> !entity.startsWith("property.")) + .findFirst(); if (optional.isPresent()) { throw new AnalysisException(optional.get() + " is invalid kafka property or can not be set"); } @@ -219,8 +220,8 @@ private void checkKafkaProperties() throws UserException { String kafkaOffsetsString = properties.get(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY); String kafkaDefaultOffsetString = customKafkaProperties.get(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS); if (kafkaOffsetsString != null && kafkaDefaultOffsetString != null) { - throw new AnalysisException("Only one of " + CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + - " and " + CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can be set."); + throw new AnalysisException("Only one of " + CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + + " and " + CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can be set."); } if (isAlter && kafkaPartitionsString != null && kafkaOffsetsString == null && kafkaDefaultOffsetString == null) { // if this is an alter operation, the partition and (default)offset must be set together. @@ -328,8 +329,8 @@ private static boolean analyzeKafkaOffsetProperty(String kafkaOffsetsString, Lis } } if (foundTime && foundOffset) { - throw new AnalysisException("The offset of the partition cannot be specified by the timestamp " + - "and the offset at the same time"); + throw new AnalysisException("The offset of the partition cannot be specified by the timestamp " + + "and the offset at the same time"); } if (foundTime) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java index 2c5bd245f56d6d..db2e688b6ae539 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java @@ -77,7 +77,11 @@ public TableName getTblName() { public Expr getExpr() { return expr; } - public void setExpr(Expr expr) { this.expr = expr; } + + public void setExpr(Expr expr) { + this.expr = expr; + } + public String getAlias() { return alias; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java index d861d63d2b0e37..8e7a56598d0560 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java @@ -421,8 +421,8 @@ public void analyze(Analyzer analyzer) throws UserException { // Analyze the resultExpr before generating a label to ensure enforcement // of expr child and depth limits (toColumn() label may call toSql()). item.getExpr().analyze(analyzer); - if (!(item.getExpr() instanceof CaseExpr) && - item.getExpr().contains(Predicates.instanceOf(Subquery.class))) { + if (!(item.getExpr() instanceof CaseExpr) + && item.getExpr().contains(Predicates.instanceOf(Subquery.class))) { throw new AnalysisException("Subquery is not supported in the select list."); } Expr expr = rewriteQueryExprByMvColumnExpr(item.getExpr(), analyzer); @@ -998,8 +998,7 @@ private void analyzeAggregation(Analyzer analyzer) throws AnalysisException { // disallow '*' and explicit GROUP BY (we can't group by '*', and if you need to // name all star-expanded cols in the group by clause you might as well do it // in the select list) - if (groupByClause != null || - TreeNode.contains(resultExprs, Expr.isAggregatePredicate())) { + if (groupByClause != null || TreeNode.contains(resultExprs, Expr.isAggregatePredicate())) { for (SelectListItem item : selectList.getItems()) { if (item.isStar()) { throw new AnalysisException( @@ -1047,13 +1046,6 @@ private void analyzeAggregation(Analyzer analyzer) throws AnalysisException { } groupByClause.genGroupingExprs(); if (groupingInfo != null) { - GroupByClause.GroupingType groupingType = groupByClause.getGroupingType(); - if ((groupingType == GroupByClause.GroupingType.GROUPING_SETS && CollectionUtils - .isNotEmpty(groupByClause.getGroupingSetList())) - || groupingType == GroupByClause.GroupingType.CUBE - || groupingType == GroupByClause.GroupingType.ROLLUP) { - - } groupingInfo.buildRepeat(groupByClause.getGroupingExprs(), groupByClause.getGroupingSetList()); } substituteOrdinalsAliases(groupByClause.getGroupingExprs(), "GROUP BY", analyzer); @@ -1116,8 +1108,8 @@ private void analyzeAggregation(Analyzer analyzer) throws AnalysisException { if (sortInfo != null) { sortInfo.substituteOrderingExprs(combinedSmap, analyzer); if (LOG.isDebugEnabled()) { - LOG.debug("post-agg orderingExprs: " + - Expr.debugString(sortInfo.getOrderingExprs())); + LOG.debug("post-agg orderingExprs: " + + Expr.debugString(sortInfo.getOrderingExprs())); } } @@ -1125,16 +1117,16 @@ private void analyzeAggregation(Analyzer analyzer) throws AnalysisException { for (int i = 0; i < selectList.getItems().size(); ++i) { if (!resultExprs.get(i).isBoundByTupleIds(groupingByTupleIds)) { throw new AnalysisException( - "select list expression not produced by aggregation output " + "(missing from " + - "GROUP BY clause?): " + selectList.getItems().get(i).getExpr().toSql()); + "select list expression not produced by aggregation output " + "(missing from " + + "GROUP BY clause?): " + selectList.getItems().get(i).getExpr().toSql()); } } if (orderByElements != null) { for (int i = 0; i < orderByElements.size(); ++i) { if (!sortInfo.getOrderingExprs().get(i).isBoundByTupleIds(groupingByTupleIds)) { throw new AnalysisException( - "ORDER BY expression not produced by aggregation output " + "(missing from " + - "GROUP BY clause?): " + orderByElements.get(i).getExpr().toSql()); + "ORDER BY expression not produced by aggregation output " + "(missing from " + + "GROUP BY clause?): " + orderByElements.get(i).getExpr().toSql()); } if (sortInfo.getOrderingExprs().get(i).type.isObjectStored()) { @@ -1145,8 +1137,8 @@ private void analyzeAggregation(Analyzer analyzer) throws AnalysisException { if (havingPred != null) { if (!havingPred.isBoundByTupleIds(groupingByTupleIds)) { throw new AnalysisException( - "HAVING clause not produced by aggregation output " + "(missing from GROUP BY " + - "clause?): " + havingClause.toSql()); + "HAVING clause not produced by aggregation output " + "(missing from GROUP BY " + + "clause?): " + havingClause.toSql()); } } } @@ -1285,16 +1277,6 @@ private void createAnalyticInfo(Analyzer analyzer) throws AnalysisException { return; } ExprSubstitutionMap rewriteSmap = new ExprSubstitutionMap(); - for (Expr expr : analyticExprs) { - AnalyticExpr toRewrite = (AnalyticExpr) expr; - Expr newExpr = AnalyticExpr.rewrite(toRewrite); - if (newExpr != null) { - newExpr.analyze(analyzer); - if (!rewriteSmap.containsMappingFor(toRewrite)) { - rewriteSmap.put(toRewrite, newExpr); - } - } - } if (rewriteSmap.size() > 0) { // Substitute the exprs with their rewritten versions. ArrayList updatedAnalyticExprs = @@ -1322,8 +1304,8 @@ private void createAnalyticInfo(Analyzer analyzer) throws AnalysisException { if (sortInfo != null) { sortInfo.substituteOrderingExprs(smap, analyzer); if (LOG.isDebugEnabled()) { - LOG.debug("post-analytic orderingExprs: " + - Expr.debugString(sortInfo.getOrderingExprs())); + LOG.debug("post-analytic orderingExprs: " + + Expr.debugString(sortInfo.getOrderingExprs())); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Separator.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Separator.java index 217c06f2303344..c8c096d081a394 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Separator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Separator.java @@ -126,7 +126,7 @@ private static String unescape(String orig) { case 'n': sb.append('\n'); break; - default : + default: sb.append('\\').append(ch); break; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java index 5ffac6a0b71e27..2062bf26df148a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java @@ -114,16 +114,16 @@ protected SetOperationStmt(SetOperationStmt other) { (other.limitElement == null) ? null : other.limitElement.clone()); operands = Lists.newArrayList(); if (analyzer != null) { - for (SetOperand o: other.distinctOperands) { + for (SetOperand o : other.distinctOperands) { distinctOperands.add(o.clone()); } - for (SetOperand o: other.allOperands) { + for (SetOperand o : other.allOperands) { allOperands.add(o.clone()); } operands.addAll(distinctOperands); operands.addAll(allOperands); } else { - for (SetOperand operand: other.operands) { + for (SetOperand operand : other.operands) { operands.add(operand.clone()); } } @@ -138,7 +138,9 @@ protected SetOperationStmt(SetOperationStmt other) { } @Override - public SetOperationStmt clone() { return new SetOperationStmt(this); } + public SetOperationStmt clone() { + return new SetOperationStmt(this); + } /** * Undoes all changes made by analyze() except distinct propagation and unnesting. @@ -150,7 +152,7 @@ protected SetOperationStmt(SetOperationStmt other) { @Override public void reset() { super.reset(); - for (SetOperand op: operands) { + for (SetOperand op : operands) { op.reset(); } distinctOperands.clear(); @@ -169,21 +171,46 @@ public void resetSelectList() { } } - public List getOperands() { return operands; } - public List getDistinctOperands() { return distinctOperands; } - public boolean hasDistinctOps() { return !distinctOperands.isEmpty(); } - public List getAllOperands() { return allOperands; } - public boolean hasAllOps() { return !allOperands.isEmpty(); } - public AggregateInfo getDistinctAggInfo() { return distinctAggInfo; } - public boolean hasAnalyticExprs() { return hasAnalyticExprs; } - public TupleId getTupleId() { return tupleId; } + public List getOperands() { + return operands; + } + + public List getDistinctOperands() { + return distinctOperands; + } + + public boolean hasDistinctOps() { + return !distinctOperands.isEmpty(); + } + + public List getAllOperands() { + return allOperands; + } + + public boolean hasAllOps() { + return !allOperands.isEmpty(); + } + + public AggregateInfo getDistinctAggInfo() { + return distinctAggInfo; + } + + public boolean hasAnalyticExprs() { + return hasAnalyticExprs; + } + + public TupleId getTupleId() { + return tupleId; + } public void removeAllOperands() { operands.removeAll(allOperands); allOperands.clear(); } - public List getSetOpsResultExprs() { return setOpsResultExprs; } + public List getSetOpsResultExprs() { + return setOpsResultExprs; + } @Override public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { @@ -236,7 +263,7 @@ public void analyze(Analyzer analyzer) throws UserException { // Compute hasAnalyticExprs_ hasAnalyticExprs = false; - for (SetOperand op: operands) { + for (SetOperand op : operands) { if (op.hasAnalyticExprs()) { hasAnalyticExprs = true; break; @@ -245,7 +272,7 @@ public void analyze(Analyzer analyzer) throws UserException { // Collect all result expr lists and cast the exprs as necessary. List> resultExprLists = Lists.newArrayList(); - for (SetOperand op: operands) { + for (SetOperand op : operands) { resultExprLists.add(op.getQueryStmt().getResultExprs()); } analyzer.castToSetOpsCompatibleTypes(resultExprLists); @@ -256,7 +283,7 @@ public void analyze(Analyzer analyzer) throws UserException { createSortInfo(analyzer); // Create unnested operands' smaps. - for (SetOperand operand: operands) { + for (SetOperand operand : operands) { setOperandSmap(operand, analyzer); } @@ -297,10 +324,10 @@ private void analyzeOperands(Analyzer analyzer) throws AnalysisException, UserEx QueryStmt query = operands.get(i).getQueryStmt(); List exprs = query.getResultExprs(); if (firstExprs.size() != exprs.size()) { - throw new AnalysisException("Operands have unequal number of columns:\n" + - "'" + queryStmtToSql(firstQuery) + "' has " + - firstExprs.size() + " column(s)\n" + - "'" + queryStmtToSql(query) + "' has " + exprs.size() + " column(s)"); + throw new AnalysisException("Operands have unequal number of columns:\n" + + "'" + queryStmtToSql(firstQuery) + "' has " + + firstExprs.size() + " column(s)\n" + + "'" + queryStmtToSql(query) + "' has " + exprs.size() + " column(s)"); } } } @@ -340,10 +367,10 @@ private void unnestOperands(Analyzer analyzer) throws AnalysisException { unnestOperand(allOperands, Qualifier.ALL, operands.get(i)); } - for (SetOperand op: distinctOperands) { + for (SetOperand op : distinctOperands) { op.setQualifier(Qualifier.DISTINCT); } - for (SetOperand op: allOperands) { + for (SetOperand op : allOperands) { op.setQualifier(Qualifier.ALL); } @@ -500,7 +527,7 @@ private void createMetadata(Analyzer analyzer) throws AnalysisException { // to operands' result exprs (if those happen to be slotrefs); // don't do that if the operand computes analytic exprs // (see Planner.createInlineViewPlan() for the reasoning) - for (SetOperand op: operands) { + for (SetOperand op : operands) { Expr resultExpr = op.getQueryStmt().getResultExprs().get(i); slotDesc.addSourceExpr(resultExpr); SlotRef slotRef = resultExpr.unwrapSlotRef(false); @@ -552,7 +579,7 @@ public void materializeRequiredSlots(Analyzer analyzer) throws AnalysisException if (!slotDesc.isMaterialized()) { continue; } - for (SetOperand op: operands) { + for (SetOperand op : operands) { exprs.add(op.getQueryStmt().getBaseTblResultExprs().get(i)); } if (distinctAggInfo != null) { @@ -563,7 +590,7 @@ public void materializeRequiredSlots(Analyzer analyzer) throws AnalysisException } materializeSlots(analyzer, exprs); - for (SetOperand op: operands) { + for (SetOperand op : operands) { op.getQueryStmt().materializeRequiredSlots(analyzer); } } @@ -606,11 +633,11 @@ public void putBackExprs(Map rewrittenExprMap) { @Override public void rewriteExprs(ExprRewriter rewriter) throws AnalysisException { - for (SetOperand op: operands) { + for (SetOperand op : operands) { op.getQueryStmt().rewriteExprs(rewriter); } if (orderByElements != null) { - for (OrderByElement orderByElem: orderByElements) { + for (OrderByElement orderByElem : orderByElements) { orderByElem.setExpr(rewriter.rewrite(orderByElem.getExpr(), analyzer)); } } @@ -628,7 +655,7 @@ public void getMaterializedTupleIds(ArrayList tupleIdList) { @Override public void collectTableRefs(List tblRefs) { - for (SetOperand op: operands) { + for (SetOperand op : operands) { op.getQueryStmt().collectTableRefs(tblRefs); } } @@ -636,7 +663,7 @@ public void collectTableRefs(List tblRefs) { @Override public List collectTupleIds() { List result = Lists.newArrayList(); - for (SetOperand op: operands) { + for (SetOperand op : operands) { result.addAll(op.getQueryStmt().collectTupleIds()); } return result; @@ -655,9 +682,9 @@ public String toSql() { Preconditions.checkState(operands.size() > 0); strBuilder.append(operands.get(0).getQueryStmt().toSql()); for (int i = 1; i < operands.size() - 1; ++i) { - strBuilder.append( - " " + operands.get(i).getOperation().toString() + " " - + ((operands.get(i).getQualifier() == Qualifier.ALL) ? "ALL " : "")); + strBuilder.append(" " + + operands.get(i).getOperation().toString() + " " + + ((operands.get(i).getQualifier() == Qualifier.ALL) ? "ALL " : "")); if (operands.get(i).getQueryStmt() instanceof SetOperationStmt) { strBuilder.append("("); } @@ -671,9 +698,9 @@ public String toSql() { QueryStmt lastQueryStmt = lastOperand.getQueryStmt(); strBuilder.append(" " + lastOperand.getOperation().toString() + " " + ((lastOperand.getQualifier() == Qualifier.ALL) ? "ALL " : "")); - if (lastQueryStmt instanceof SetOperationStmt || ((hasOrderByClause() || hasLimitClause()) && - !lastQueryStmt.hasLimitClause() && - !lastQueryStmt.hasOrderByClause())) { + if (lastQueryStmt instanceof SetOperationStmt || ((hasOrderByClause() || hasLimitClause()) + && !lastQueryStmt.hasLimitClause() + && !lastQueryStmt.hasOrderByClause())) { strBuilder.append("("); strBuilder.append(lastQueryStmt.toSql()); strBuilder.append(")"); @@ -722,9 +749,9 @@ public String toDigest() { QueryStmt lastQueryStmt = lastOperand.getQueryStmt(); strBuilder.append(" " + lastOperand.getOperation().toString() + " " + ((lastOperand.getQualifier() == Qualifier.ALL) ? "ALL " : "")); - if (lastQueryStmt instanceof SetOperationStmt || ((hasOrderByClause() || hasLimitClause()) && - !lastQueryStmt.hasLimitClause() && - !lastQueryStmt.hasOrderByClause())) { + if (lastQueryStmt instanceof SetOperationStmt || ((hasOrderByClause() || hasLimitClause()) + && !lastQueryStmt.hasLimitClause() + && !lastQueryStmt.hasOrderByClause())) { strBuilder.append("("); strBuilder.append(lastQueryStmt.toDigest()); strBuilder.append(")"); @@ -844,23 +871,41 @@ public void analyze(Analyzer parent) throws AnalysisException, UserException { queryStmt.analyze(analyzer); } - public boolean isAnalyzed() { return analyzer != null; } - public QueryStmt getQueryStmt() { return queryStmt; } - public Qualifier getQualifier() { return qualifier; } + public boolean isAnalyzed() { + return analyzer != null; + } + + public QueryStmt getQueryStmt() { + return queryStmt; + } + + public Qualifier getQualifier() { + return qualifier; + } + public Operation getOperation() { return operation; } // Used for propagating DISTINCT. - public void setQualifier(Qualifier qualifier) { this.qualifier = qualifier; } + public void setQualifier(Qualifier qualifier) { + this.qualifier = qualifier; + } public void setOperation(Operation operation) { - this.operation =operation; + this.operation = operation; } + public void setQueryStmt(QueryStmt queryStmt) { this.queryStmt = queryStmt; } - public Analyzer getAnalyzer() { return analyzer; } - public ExprSubstitutionMap getSmap() { return smap; } + + public Analyzer getAnalyzer() { + return analyzer; + } + + public ExprSubstitutionMap getSmap() { + return smap; + } public boolean hasAnalyticExprs() { if (queryStmt instanceof SelectStmt) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java index 2ff1fe7e43c770..3b1eeec8e93142 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java @@ -49,7 +49,7 @@ public SetVar(SetType type, String variable, Expr value) { this.variable = variable; this.value = value; if (value instanceof LiteralExpr) { - this.result = (LiteralExpr)value; + this.result = (LiteralExpr) value; } } @@ -58,7 +58,7 @@ public SetVar(String variable, Expr value) { this.variable = variable; this.value = value; if (value instanceof LiteralExpr) { - this.result = (LiteralExpr)value; + this.result = (LiteralExpr) value; } } @@ -114,7 +114,7 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { throw new AnalysisException("Set statement does't support computing expr:" + literalExpr.toSql()); } - result = (LiteralExpr)literalExpr; + result = (LiteralExpr) literalExpr; // Need to check if group is valid if (variable.equalsIgnoreCase(SessionVariable.RESOURCE_VARIABLE)) { @@ -153,9 +153,9 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { if (getVariable().equalsIgnoreCase(SessionVariable.PARTITION_PRUNE_ALGORITHM_VERSION)) { String value = getValue().getStringValue(); if (!"1".equals(value) && !"2".equals(value)) { - throw new AnalysisException("Value of " + - SessionVariable.PARTITION_PRUNE_ALGORITHM_VERSION + " should be " + - "either 1 or 2, but meet " + value); + throw new AnalysisException("Value of " + + SessionVariable.PARTITION_PRUNE_ALGORITHM_VERSION + " should be " + + "either 1 or 2, but meet " + value); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java index 49ea6c839c84b3..8bfc5ed59d14d1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java @@ -65,11 +65,25 @@ public static enum AlterType { private ProcNodeInterface node; - public AlterType getType() { return type; } - public String getDbName() { return dbName; } - public HashMap getFilterMap() { return filterMap; } - public LimitElement getLimitElement(){ return limitElement; } - public ArrayList getOrderPairs(){ return orderByPairs; } + public AlterType getType() { + return type; + } + + public String getDbName() { + return dbName; + } + + public HashMap getFilterMap() { + return filterMap; + } + + public LimitElement getLimitElement() { + return limitElement; + } + + public ArrayList getOrderPairs() { + return orderByPairs; + } public ProcNodeInterface getNode() { return this.node; @@ -95,8 +109,8 @@ private void getPredicateValue(Expr subExpr) throws AnalysisException { } String leftKey = ((SlotRef) subExpr.getChild(0)).getColumnName().toLowerCase(); if (leftKey.equals("tablename") || leftKey.equals("state")) { - if (!(subExpr.getChild(1) instanceof StringLiteral) || - binaryPredicate.getOp() != BinaryPredicate.Operator.EQ) { + if (!(subExpr.getChild(1) instanceof StringLiteral) + || binaryPredicate.getOp() != BinaryPredicate.Operator.EQ) { throw new AnalysisException("Where clause : TableName = \"table1\" or " + "State = \"FINISHED|CANCELLED|RUNNING|PENDING|WAITING_TXN\""); } @@ -105,7 +119,7 @@ private void getPredicateValue(Expr subExpr) throws AnalysisException { throw new AnalysisException("Where clause : CreateTime/FinishTime =|>=|<=|>|<|!= " + "\"2019-12-02|2019-12-02 14:54:00\""); } - subExpr.setChild(1,((StringLiteral) subExpr.getChild(1)).castTo(Type.DATETIME)); + subExpr.setChild(1, ((StringLiteral) subExpr.getChild(1)).castTo(Type.DATETIME)); } else { throw new AnalysisException("The columns of TableName/CreateTime/FinishTime/State are supported."); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java index 12b037eec4c10a..bf9b8676d1cc54 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java @@ -44,12 +44,12 @@ public void analyze(Analyzer analyzer) throws AnalysisException { @Override public ShowResultSetMetaData getMetaData() { - ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - for (String title : BackendsProcDir.TITLE_NAMES) { + ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); + for (String title : BackendsProcDir.TITLE_NAMES) { // hide hostname for SHOW BACKENDS stmt if (title.equals("HostName")) { - continue; - } + continue; + } builder.addColumn(new Column(title, ScalarType.createVarchar(30))); } return builder.build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCreateFunctionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCreateFunctionStmt.java index a7d9db8c1d7df0..e9bc7e86c160a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCreateFunctionStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCreateFunctionStmt.java @@ -54,9 +54,13 @@ public String getDbName() { return dbName; } - public FunctionName getFunctionName() { return functionName; } + public FunctionName getFunctionName() { + return functionName; + } - public FunctionSearchDesc getFunction() { return function; } + public FunctionSearchDesc getFunction() { + return function; + } @Override public void analyze(Analyzer analyzer) throws UserException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDataStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDataStmt.java index 0422bbbe2bc833..3f8637b7625151 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDataStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDataStmt.java @@ -70,13 +70,12 @@ public class ShowDataStmt extends ShowStmt { .addColumn(new Column("ReplicaCount", ScalarType.createVarchar(20))) .addColumn(new Column("RowCount", ScalarType.createVarchar(20))) .build(); - public static final ImmutableList SHOW_TABLE_DATA_META_DATA_ORIGIN = new ImmutableList.Builder() - .add("TableName").add("Size").add("ReplicaCount") - .build(); + public static final ImmutableList SHOW_TABLE_DATA_META_DATA_ORIGIN = + new ImmutableList.Builder().add("TableName").add("Size").add("ReplicaCount").build(); - public static final ImmutableList SHOW_INDEX_DATA_META_DATA_ORIGIN = new ImmutableList.Builder() - .add("TableName").add("IndexName").add("Size").add("ReplicaCount").add("RowCount") - .build(); + public static final ImmutableList SHOW_INDEX_DATA_META_DATA_ORIGIN = + new ImmutableList.Builder().add("TableName").add("IndexName").add("Size").add("ReplicaCount") + .add("RowCount").build(); private String dbName; private String tableName; @@ -115,8 +114,8 @@ public void analyze(Analyzer analyzer) throws UserException { if (!(orderByElement.getExpr() instanceof SlotRef)) { throw new AnalysisException("Should order by column"); } - SlotRef slotRef = (SlotRef)orderByElement.getExpr(); - int index = analyzeColumn(slotRef.getColumnName(),tableName); + SlotRef slotRef = (SlotRef) orderByElement.getExpr(); + int index = analyzeColumn(slotRef.getColumnName(), tableName); OrderByPair orderByPair = new OrderByPair(index, !orderByElement.getIsAsc()); orderByPairs.add(orderByPair); } @@ -183,11 +182,11 @@ public int compare(Table t1, Table t2) { // for output for (List row : totalRowsObject) { //|TableName|Size|ReplicaCount| - Pair tableSizePair = DebugUtil.getByteUint((long)row.get(1)); + Pair tableSizePair = DebugUtil.getByteUint((long) row.get(1)); String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(tableSizePair.first) + " " - + tableSizePair.second; - List result = Arrays.asList(String.valueOf(row.get(0)), readableSize, - String.valueOf(row.get(2))); + + tableSizePair.second; + List result = Arrays.asList(String.valueOf(row.get(0)), + readableSize, String.valueOf(row.get(2))); totalRows.add(result); } @@ -229,7 +228,6 @@ public int compare(Table t1, Table t2) { } OlapTable olapTable = db.getTableOrMetaException(tableName, TableType.OLAP); - int i = 0; long totalSize = 0; long totalReplicaCount = 0; @@ -260,8 +258,6 @@ public int compare(Table t1, Table t2) { totalSize += indexSize; totalReplicaCount += indexReplicaCount; - - i++; } // end for indices // sort by @@ -276,13 +272,13 @@ public int compare(Table t1, Table t2) { } // for output - for (int index = 0;index<= totalRowsObject.size() -1;index++) { + for (int index = 0; index <= totalRowsObject.size() - 1; index++) { //| TableName| IndexName | Size | ReplicaCount | RowCount | List row = totalRowsObject.get(index); List result; - Pair tableSizePair = DebugUtil.getByteUint((long)row.get(2)); - String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(tableSizePair.first) + " " - + tableSizePair.second; + Pair tableSizePair = DebugUtil.getByteUint((long) row.get(2)); + String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(tableSizePair.first) + + " " + tableSizePair.second; if (index == 0) { result = Arrays.asList(tableName, String.valueOf(row.get(1)), readableSize, String.valueOf(row.get(3)), @@ -306,9 +302,9 @@ public int compare(Table t1, Table t2) { } } - public static int analyzeColumn(String columnName,String tableName) throws AnalysisException { + public static int analyzeColumn(String columnName, String tableName) throws AnalysisException { ImmutableList titles = SHOW_TABLE_DATA_META_DATA_ORIGIN; - if(tableName != null){ + if (tableName != null) { titles = SHOW_INDEX_DATA_META_DATA_ORIGIN; } for (String title : titles) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowEncryptKeysStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowEncryptKeysStmt.java index 9aa45192dd34e4..17d94aefa8075a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowEncryptKeysStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowEncryptKeysStmt.java @@ -31,7 +31,7 @@ import com.google.common.base.Strings; -public class ShowEncryptKeysStmt extends ShowStmt{ +public class ShowEncryptKeysStmt extends ShowStmt { private static final ShowResultSetMetaData META_DATA = ShowResultSetMetaData.builder() .addColumn(new Column("EncryptKey Name", ScalarType.createVarchar(20))) diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java index b6f04f979e9450..481550ab29b5cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java @@ -175,8 +175,8 @@ private void analyzePredicate(Expr whereExpr) throws AnalysisException { if (!valid) { throw new AnalysisException("Where clause should looks like below: " - + " ID = $your_job_id, or STATE = \"PENDING|EXPORTING|FINISHED|CANCELLED\", " + - "or LABEL = \"xxx\" or LABEL like \"xxx%\""); + + " ID = $your_job_id, or STATE = \"PENDING|EXPORTING|FINISHED|CANCELLED\", " + + "or LABEL = \"xxx\" or LABEL like \"xxx%\""); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFunctionsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFunctionsStmt.java index ba261484720e10..837a74c00ca1cb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFunctionsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFunctionsStmt.java @@ -59,7 +59,9 @@ public ShowFunctionsStmt(String dbName, boolean isBuiltin, boolean isVerbose, St this.expr = expr; } - public String getDbName() { return dbName; } + public String getDbName() { + return dbName; + } public boolean getIsBuiltin() { return isBuiltin; @@ -97,7 +99,7 @@ public void analyze(Analyzer analyzer) throws UserException { if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.SHOW)) { ErrorReport.reportAnalysisException( - ErrorCode.ERR_DBACCESS_DENIED_ERROR, ConnectContext.get().getQualifiedUser(), dbName); + ErrorCode.ERR_DBACCESS_DENIED_ERROR, ConnectContext.get().getQualifiedUser(), dbName); } if (expr != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java index 3091112cfd382d..7b2bf92fa81df2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java @@ -90,7 +90,7 @@ public LimitElement getLimitElement() { } public Map getFilterMap() { - return filterMap; + return filterMap; } public ProcNodeInterface getNode() { @@ -191,18 +191,18 @@ private void analyzeSubPredicate(Expr subExpr) throws AnalysisException { BinaryPredicate binaryPredicate = (BinaryPredicate) subExpr; if (leftKey.equalsIgnoreCase(FILTER_PARTITION_NAME) || leftKey.equalsIgnoreCase(FILTER_STATE)) { if (binaryPredicate.getOp() != BinaryPredicate.Operator.EQ) { - throw new AnalysisException(String.format("Only operator =|like are supported for %s", leftKey)); + throw new AnalysisException(String.format("Only operator =|like are supported for %s", leftKey)); } } else if (leftKey.equalsIgnoreCase(FILTER_LAST_CONSISTENCY_CHECK_TIME)) { if (!(subExpr.getChild(1) instanceof StringLiteral)) { throw new AnalysisException("Where clause : LastConsistencyCheckTime =|>=|<=|>|<|!= " + "\"2019-12-22|2019-12-22 22:22:00\""); } - subExpr.setChild(1,(subExpr.getChild(1)).castTo(Type.DATETIME)); - } else if (!leftKey.equalsIgnoreCase(FILTER_PARTITION_ID) && !leftKey.equalsIgnoreCase(FILTER_BUCKETS) && - !leftKey.equalsIgnoreCase(FILTER_REPLICATION_NUM)) { - throw new AnalysisException("Only the columns of PartitionId/PartitionName/" + - "State/Buckets/ReplicationNum/LastConsistencyCheckTime are supported."); + subExpr.setChild(1, (subExpr.getChild(1)).castTo(Type.DATETIME)); + } else if (!leftKey.equalsIgnoreCase(FILTER_PARTITION_ID) && !leftKey.equalsIgnoreCase(FILTER_BUCKETS) + && !leftKey.equalsIgnoreCase(FILTER_REPLICATION_NUM)) { + throw new AnalysisException("Only the columns of PartitionId/PartitionName/" + + "State/Buckets/ReplicationNum/LastConsistencyCheckTime are supported."); } } else if (subExpr instanceof LikePredicate) { LikePredicate likePredicate = (LikePredicate) subExpr; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRepositoriesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRepositoriesStmt.java index 73f8977690ab93..dbbd5d7b36f98d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRepositoriesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRepositoriesStmt.java @@ -37,9 +37,9 @@ public ShowRepositoriesStmt() { public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); for (String title : TITLE_NAMES) { - builder.addColumn(new Column(title, ScalarType.createVarchar(30))); - } - return builder.build(); + builder.addColumn(new Column(title, ScalarType.createVarchar(30))); + } + return builder.build(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java index df11a77f5e6d2c..f009b8d687545f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java @@ -53,7 +53,7 @@ public class ShowResourcesStmt extends ShowStmt { private ArrayList orderByPairs; - public ShowResourcesStmt() { + public ShowResourcesStmt() { } public ShowResourcesStmt(Expr labelExpr, List orderByElements, LimitElement limitElement) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java index 6a7dccca06c8e6..553151165b20dd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java @@ -95,7 +95,7 @@ private void checkJobNameExpr(Analyzer analyzer) throws AnalysisException { boolean valid = true; CHECK: - { + { // CHECKSTYLE IGNORE THIS LINE // check predicate if (!(jobNameExpr instanceof BinaryPredicate)) { valid = false; @@ -125,7 +125,7 @@ private void checkJobNameExpr(Analyzer analyzer) throws AnalysisException { } StringLiteral stringLiteral = (StringLiteral) binaryPredicate.getChild(1); jobName = stringLiteral.getValue().toLowerCase(); - } + } // CHECKSTYLE IGNORE THIS LINE if (!valid) { throw new AnalysisException("show routine load job only support one equal expr which is sames like JobName=\"ILoveDoris\""); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowSmallFilesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowSmallFilesStmt.java index e83328bf2a2522..359f284a972f8b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowSmallFilesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowSmallFilesStmt.java @@ -48,7 +48,9 @@ public ShowSmallFilesStmt(String dbName) { this.dbName = dbName; } - public String getDbName() { return dbName; } + public String getDbName() { + return dbName; + } @Override public void analyze(Analyzer analyzer) throws UserException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java index d9bc8ac25f0386..1ecfa5e060e79c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java @@ -93,6 +93,7 @@ public ArrayList getOrderByFinishTime() { try { index = analyzeColumn("FinishTime"); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } OrderByPair orderByPair = new OrderByPair(index, false); orderByFinishTime.add(orderByPair); @@ -126,6 +127,7 @@ public StreamLoadState getState() { try { state = StreamLoadState.valueOf(stateValue); } catch (Exception e) { + // CHECKSTYLE IGNORE THIS LINE } return state; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTabletStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTabletStmt.java index 5cc10e741d21c7..927c075ee65fad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTabletStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTabletStmt.java @@ -55,7 +55,7 @@ public class ShowTabletStmt extends ShowStmt { private boolean isShowSingleTablet; public ShowTabletStmt(TableName dbTableName, long tabletId) { - this(dbTableName, tabletId, null, null, null,null); + this(dbTableName, tabletId, null, null, null, null); } public ShowTabletStmt(TableName dbTableName, long tabletId, PartitionNames partitionNames, @@ -100,27 +100,49 @@ public boolean isShowSingleTablet() { return isShowSingleTablet; } - public boolean hasOffset() { return limitElement != null && limitElement.hasOffset(); } + public boolean hasOffset() { + return limitElement != null && limitElement.hasOffset(); + } - public long getOffset() { return limitElement.getOffset(); } + public long getOffset() { + return limitElement.getOffset(); + } - public boolean hasPartition() { return partitionNames != null; } + public boolean hasPartition() { + return partitionNames != null; + } - public PartitionNames getPartitionNames() { return partitionNames; } + public PartitionNames getPartitionNames() { + return partitionNames; + } - public boolean hasLimit() { return limitElement != null && limitElement.hasLimit(); } + public boolean hasLimit() { + return limitElement != null && limitElement.hasLimit(); + } - public long getLimit() { return limitElement.getLimit(); } + public long getLimit() { + return limitElement.getLimit(); + } - public long getVersion() { return version; } + public long getVersion() { + return version; + } - public long getBackendId() { return backendId; } + public long getBackendId() { + return backendId; + } - public String getIndexName() { return indexName; } + public String getIndexName() { + return indexName; + } - public List getOrderByPairs() { return orderByPairs; } + public List getOrderByPairs() { + return orderByPairs; + } - public Replica.ReplicaState getReplicaState() { return replicaState; } + public Replica.ReplicaState getReplicaState() { + return replicaState; + } @Override public void analyze(Analyzer analyzer) throws UserException { @@ -209,11 +231,11 @@ private void analyzeSubPredicate(Expr subExpr) throws AnalysisException { } String leftKey = ((SlotRef) subExpr.getChild(0)).getColumnName(); if (leftKey.equalsIgnoreCase("version")) { - if (!(subExpr.getChild(1) instanceof IntLiteral) || version > -1) { - valid = false; - break; - } - version = ((IntLiteral) subExpr.getChild(1)).getValue(); + if (!(subExpr.getChild(1) instanceof IntLiteral) || version > -1) { + valid = false; + break; + } + version = ((IntLiteral) subExpr.getChild(1)).getValue(); } else if (leftKey.equalsIgnoreCase("backendid")) { if (!(subExpr.getChild(1) instanceof IntLiteral) || backendId > -1) { valid = false; @@ -243,7 +265,7 @@ private void analyzeSubPredicate(Expr subExpr) throws AnalysisException { valid = false; break; } - } while(false); + } while (false); if (!valid) { throw new AnalysisException("Where clause should looks like: Version = \"version\"," @@ -264,7 +286,7 @@ public String toSql() { if (limitElement != null) { if (limitElement.hasOffset() && limitElement.hasLimit()) { sb.append(" ").append(limitElement.getOffset()).append(",").append(limitElement.getLimit()); - } else if (limitElement.hasLimit()){ + } else if (limitElement.hasLimit()) { sb.append(" ").append(limitElement.getLimit()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTransactionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTransactionStmt.java index d0bcba502a5dfd..0766df2082b3ab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTransactionStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowTransactionStmt.java @@ -129,8 +129,8 @@ private void analyzeWhereClause() throws AnalysisException { } if (!valid) { - throw new AnalysisException("Where clause should looks like one of them: id = 123 or label = 'label' " + - "or status = 'prepare/precommitted/committed/visible/aborted'"); + throw new AnalysisException("Where clause should looks like one of them: id = 123 or label = 'label' " + + "or status = 'prepare/precommitted/committed/visible/aborted'"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java index e516894edaf6ba..9cdd0b23b718c8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java @@ -213,12 +213,29 @@ public int getSlotOffset() { return slotOffset; } - public String getLabel() { return label; } - public void setLabel(String label) { this.label = label; } - public void setSourceExprs(List exprs) { sourceExprs = exprs; } - public void setSourceExpr(Expr expr) { sourceExprs = Collections.singletonList(expr); } - public void addSourceExpr(Expr expr) { sourceExprs.add(expr); } - public List getSourceExprs() { return sourceExprs; } + public String getLabel() { + return label; + } + + public void setLabel(String label) { + this.label = label; + } + + public void setSourceExprs(List exprs) { + sourceExprs = exprs; + } + + public void setSourceExpr(Expr expr) { + sourceExprs = Collections.singletonList(expr); + } + + public void addSourceExpr(Expr expr) { + sourceExprs.add(expr); + } + + public List getSourceExprs() { + return sourceExprs; + } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotId.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotId.java index 6854fe2d71d3b2..0ecaf4965e6943 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotId.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotId.java @@ -31,9 +31,14 @@ public SlotId(int id) { public static IdGenerator createGenerator() { return new IdGenerator() { @Override - public SlotId getNextId() { return new SlotId(nextId++); } + public SlotId getNextId() { + return new SlotId(nextId++); + } + @Override - public SlotId getMaxId() { return new SlotId(nextId - 1); } + public SlotId getMaxId() { + return new SlotId(nextId - 1); + } }; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java index 636fbc991eeeac..6e8017e3bd5844 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java @@ -312,12 +312,14 @@ public boolean notCheckDescIdEquals(Object obj) { } @Override - protected boolean isConstantImpl() { return false; } + protected boolean isConstantImpl() { + return false; + } @Override public boolean isBoundByTupleIds(List tids) { Preconditions.checkState(desc != null); - for (TupleId tid: tids) { + for (TupleId tid : tids) { if (tid.equals(desc.getParent().getId())) { return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java index 09a2b2b3b12aa2..d57856b08bd1ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java @@ -103,12 +103,29 @@ public void setMaterializedTupleInfo( } } - public List getOrderingExprs() { return orderingExprs; } - public List getIsAscOrder() { return isAscOrder; } - public List getNullsFirstParams() { return nullsFirstParams; } - public List getMaterializedOrderingExprs() { return materializedOrderingExprs; } - public List getSortTupleSlotExprs() { return sortTupleSlotExprs; } - public TupleDescriptor getSortTupleDescriptor() { return sortTupleDesc; } + public List getOrderingExprs() { + return orderingExprs; + } + + public List getIsAscOrder() { + return isAscOrder; + } + + public List getNullsFirstParams() { + return nullsFirstParams; + } + + public List getMaterializedOrderingExprs() { + return materializedOrderingExprs; + } + + public List getSortTupleSlotExprs() { + return sortTupleSlotExprs; + } + + public TupleDescriptor getSortTupleDescriptor() { + return sortTupleDesc; + } /** * Gets the list of booleans indicating whether nulls come first or last, independent @@ -154,13 +171,15 @@ public void substituteOrderingExprs(ExprSubstitutionMap smap, Analyzer analyzer) * Asserts that all ordering exprs are bound by the sort tuple. */ public void checkConsistency() { - for (Expr orderingExpr: orderingExprs) { + for (Expr orderingExpr : orderingExprs) { Preconditions.checkState(orderingExpr.isBound(sortTupleDesc.getId())); } } @Override - public SortInfo clone() { return new SortInfo(this); } + public SortInfo clone() { + return new SortInfo(this); + } /** * Create a tuple descriptor for the single tuple that is materialized, sorted, and @@ -195,7 +214,7 @@ public ExprSubstitutionMap createSortTupleInfo( Predicates.instanceOf(SlotRef.class), sourceSlots); TreeNode.collect(Expr.substituteList(orderingExprs, substOrderBy, analyzer, false), Predicates.instanceOf(SlotRef.class), sourceSlots); - for (SlotRef origSlotRef: sourceSlots) { + for (SlotRef origSlotRef : sourceSlots) { // Don't rematerialize slots that are already in the sort tuple. if (origSlotRef.getDesc().getParent().getId() != sortTupleDesc.getId()) { SlotDescriptor origSlotDesc = origSlotRef.getDesc(); @@ -232,26 +251,12 @@ public ExprSubstitutionMap createMaterializedOrderExprs( TupleDescriptor sortTupleDesc, Analyzer analyzer) { ExprSubstitutionMap substOrderBy = new ExprSubstitutionMap(); for (Expr origOrderingExpr : orderingExprs) { - // TODO(zc): support materialized order exprs - // if (!origOrderingExpr.hasCost() - // || origOrderingExpr.getCost() > SORT_MATERIALIZATION_COST_THRESHOLD - // || origOrderingExpr.contains(Expr.IS_NONDETERMINISTIC_BUILTIN_FN_PREDICATE) - // || origOrderingExpr.contains(Expr.IS_UDF_PREDICATE)) { - // SlotDescriptor materializedDesc = analyzer.addSlotDescriptor(sortTupleDesc); - // materializedDesc.initFromExpr(origOrderingExpr); - // materializedDesc.setIsMaterialized(true); - // SlotRef materializedRef = new SlotRef(materializedDesc); - // substOrderBy.put(origOrderingExpr, materializedRef); - // materializedOrderingExprs_.add(origOrderingExpr); - // } - { - SlotDescriptor materializedDesc = analyzer.addSlotDescriptor(sortTupleDesc); - materializedDesc.initFromExpr(origOrderingExpr); - materializedDesc.setIsMaterialized(true); - SlotRef materializedRef = new SlotRef(materializedDesc); - substOrderBy.put(origOrderingExpr, materializedRef); - materializedOrderingExprs.add(origOrderingExpr); - } + SlotDescriptor materializedDesc = analyzer.addSlotDescriptor(sortTupleDesc); + materializedDesc.initFromExpr(origOrderingExpr); + materializedDesc.setIsMaterialized(true); + SlotRef materializedRef = new SlotRef(materializedDesc); + substOrderBy.put(origOrderingExpr, materializedRef); + materializedOrderingExprs.add(origOrderingExpr); } return substOrderBy; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StatementBase.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StatementBase.java index 92ffaa6bbf9b0e..eb174cfcc7c99b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StatementBase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StatementBase.java @@ -123,7 +123,9 @@ public String toSql() { * if not applicable (not all statements produce an output result set). * Subclasses must override this as necessary. */ - public List getColLabels() { return Collections.emptyList(); } + public List getColLabels() { + return Collections.emptyList(); + } /** * Sets the column labels of this statement, if applicable. No-op of the statement does @@ -143,7 +145,9 @@ public void setColLabels(List colLabels) { * empty list if not applicable (not all statements produce an output result set). * Subclasses must override this as necessary. */ - public List getResultExprs() { return Collections.emptyList(); } + public List getResultExprs() { + return Collections.emptyList(); + } /** * Casts the result expressions and derived members (e.g., destination column types for diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java index 64695fd6bb629a..1700c64ae800ec 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java @@ -718,8 +718,6 @@ private static boolean mergeExpr(SelectStmt stmt, Expr expr, if (!hasEqJoinPred && !inlineView.isCorrelated()) { // TODO: Remove this when independent subquery evaluation is implemented. // TODO: Requires support for non-equi joins. - boolean hasGroupBy = ((SelectStmt) inlineView.getViewStmt()).hasGroupByClause(); - // boolean hasGroupBy = false; if (!expr.getSubquery().returnsScalarColumn()) { throw new AnalysisException("Unsupported predicate with subquery: " + expr.toSql()); @@ -728,8 +726,8 @@ private static boolean mergeExpr(SelectStmt stmt, Expr expr, // TODO: Requires support for null-aware anti-join mode in nested-loop joins if (expr.getSubquery().isScalarSubquery() && expr instanceof InPredicate && ((InPredicate) expr).isNotIn()) { - throw new AnalysisException("Unsupported NOT IN predicate with subquery: " + - expr.toSql()); + throw new AnalysisException("Unsupported NOT IN predicate with subquery: " + + expr.toSql()); } // We can equal the aggregate subquery using a cross join. All conjuncts @@ -798,8 +796,8 @@ private static void replaceUnqualifiedStarItems(SelectStmt stmt, int tableIdx) { // tbl1,...,tbln are the visible tableRefs in stmt. for (int j = 0; j < tableIdx; ++j) { TableRef tableRef = stmt.fromClause.get(j); - if (tableRef.getJoinOp() == JoinOperator.LEFT_SEMI_JOIN || - tableRef.getJoinOp() == JoinOperator.LEFT_ANTI_JOIN) { + if (tableRef.getJoinOp() == JoinOperator.LEFT_SEMI_JOIN + || tableRef.getJoinOp() == JoinOperator.LEFT_ANTI_JOIN) { continue; } newItems.add(SelectListItem.createStarItem(tableRef.getAliasAsName())); @@ -978,8 +976,8 @@ public boolean apply(Expr arg) { && (!stmt.hasAggInfo() || !Iterables.all(correlatedPredicates, Predicates.or(Expr.IS_EQ_BINARY_PREDICATE, isSingleSlotRef)))) { - throw new AnalysisException("Unsupported correlated EXISTS subquery with a " + - "HAVING clause: " + stmt.toSql()); + throw new AnalysisException( + "Unsupported correlated EXISTS subquery with a " + "HAVING clause: " + stmt.toSql()); } // The following correlated subqueries with a limit clause are supported: diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageDesc.java index 914e0854732dae..2a6475d6c1f5db 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageDesc.java @@ -63,9 +63,9 @@ protected void tryConvertToS3() { } CaseInsensitiveMap ciProperties = new CaseInsensitiveMap(); ciProperties.putAll(properties); - if (StringUtils.isNotEmpty(ciProperties.get(BOS_ENDPOINT).toString()) && - StringUtils.isNotEmpty(ciProperties.get(BOS_ACCESS_KEY).toString()) && - StringUtils.isNotEmpty(ciProperties.get(BOS_SECRET_ACCESS_KEY).toString())) { + if (StringUtils.isNotEmpty(ciProperties.get(BOS_ENDPOINT).toString()) + && StringUtils.isNotEmpty(ciProperties.get(BOS_ACCESS_KEY).toString()) + && StringUtils.isNotEmpty(ciProperties.get(BOS_SECRET_ACCESS_KEY).toString())) { // bos endpoint like http[s]://gz.bcebos.com, we want to extract region gz, // and convert to s3 endpoint http[s]://s3.gz.bcebos.com String bosEndpiont = ciProperties.get(BOS_ENDPOINT).toString(); @@ -99,7 +99,7 @@ protected String convertPathToS3(String path) { try { URI orig = new URI(path); URI s3url = new URI("s3", orig.getRawAuthority(), - orig.getRawPath(), orig.getRawQuery(), orig.getRawFragment()); + orig.getRawPath(), orig.getRawQuery(), orig.getRawFragment()); return s3url.toString(); } catch (URISyntaxException e) { return path; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java index 8365bbbc16674e..d3056bbe2f8a83 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java @@ -48,11 +48,18 @@ public class Subquery extends Expr { // A subquery has its own analysis context protected Analyzer analyzer; - public Analyzer getAnalyzer() { return analyzer; } - public QueryStmt getStatement() { return stmt; } + public Analyzer getAnalyzer() { + return analyzer; + } + + public QueryStmt getStatement() { + return stmt; + } @Override - public String toSqlImpl() { return "(" + stmt.toSql() + ")"; } + public String toSqlImpl() { + return "(" + stmt.toSql() + ")"; + } @Override public String toDigestImpl() { @@ -84,8 +91,7 @@ public Subquery(Subquery other) { @Override public void analyzeImpl(Analyzer parentAnalyzer) throws AnalysisException { if (!(stmt instanceof SelectStmt)) { - throw new AnalysisException("A subquery must contain a single select block: " + - toSql()); + throw new AnalysisException("A subquery must contain a single select block: " + toSql()); } // The subquery is analyzed with its own analyzer. analyzer = new Analyzer(parentAnalyzer); @@ -109,7 +115,7 @@ public void analyzeImpl(Analyzer parentAnalyzer) throws AnalysisException { } // If the subquery returns many rows, set its type to MultiRowType. - if (!((SelectStmt)stmt).returnsSingleRow()) { + if (!((SelectStmt) stmt).returnsSingleRow()) { type = new MultiRowType(type); } @@ -118,7 +124,9 @@ public void analyzeImpl(Analyzer parentAnalyzer) throws AnalysisException { } @Override - protected boolean isConstantImpl() { return false; } + protected boolean isConstantImpl() { + return false; + } /** * Check if the subquery's SelectStmt returns a single column of scalar type. @@ -189,7 +197,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - return stmt.toSql().equals(((Subquery)o).stmt.toSql()); + return stmt.toSql().equals(((Subquery) o).stmt.toSql()); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SyncStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SyncStmt.java index cdafe07c973b0a..59b9d3cd8af82a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SyncStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SyncStmt.java @@ -17,15 +17,12 @@ package org.apache.doris.analysis; -import org.apache.doris.common.AnalysisException; import org.apache.doris.common.UserException; public class SyncStmt extends DdlStmt { @Override - public void analyze(Analyzer analyzer) throws AnalysisException, UserException { -// if (analyzer.getCatalog().isMaster()) { -// throw new AnalysisException("No need to Sync, for you are master"); -// } + public void analyze(Analyzer analyzer) throws UserException { + } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TableRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TableRef.java index af8463e95dfe25..429fc808c02f97 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TableRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TableRef.java @@ -460,8 +460,8 @@ public void analyzeJoin(Analyzer analyzer) throws AnalysisException { + " (in" + " \"" + this.toSql() + "\")"); } if (desc.getTable().getColumn(colName) == null) { - throw new AnalysisException("Unknown column " + colName + " for alias " + getAlias() + " (in \"" + - this.toSql() + "\")"); + throw new AnalysisException("Unknown column " + colName + " for alias " + getAlias() + " (in \"" + + this.toSql() + "\")"); } // create predicate ".colName = .colName" @@ -476,11 +476,6 @@ public void analyzeJoin(Analyzer analyzer) throws AnalysisException { } } - // at this point, both 'this' and leftTblRef have been analyzed - // and registered - boolean lhsIsNullable = false; - boolean rhsIsNullable = false; - // if (leftTblRef != null) { for (TupleId tupleId : leftTblRef.getAllTableRefIds()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ToSqlUtils.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ToSqlUtils.java index d474cacaafcf51..01f18d87dfdf8a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ToSqlUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ToSqlUtils.java @@ -48,7 +48,7 @@ public static String getIdentSql(String ident) { public static List getIdentSqlList(List identList) { List identSqlList = Lists.newArrayList(); - for (String ident: identList) { + for (String ident : identList) { identSqlList.add(getIdentSql(ident)); } return identSqlList; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleDescriptor.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleDescriptor.java index d5fc90ff10fcc9..5bfc2617133499 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleDescriptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleDescriptor.java @@ -167,7 +167,10 @@ public boolean getIsMaterialized() { return isMaterialized; } - public boolean isMaterialized() { return isMaterialized; } + public boolean isMaterialized() { + return isMaterialized; + } + public void setIsMaterialized(boolean value) { isMaterialized = value; } @@ -180,8 +183,15 @@ public void setAliases(String[] aliases, boolean hasExplicitAlias) { this.aliases = aliases; this.hasExplicitAlias = hasExplicitAlias; } - public boolean hasExplicitAlias() { return hasExplicitAlias; } - public String getAlias() { return (aliases != null) ? aliases[0] : null; } + + public boolean hasExplicitAlias() { + return hasExplicitAlias; + } + + public String getAlias() { + return (aliases != null) ? aliases[0] : null; + } + public TableName getAliasAsName() { return (aliases != null) ? new TableName(null, aliases[0]) : null; } @@ -220,7 +230,7 @@ public void computeStatAndMemLayout() { * In order to ensure that even if it is wrongly called a second time, no error will occur, * so it will be initialized again at the beginning of the function. * - * In the future this function will be changed to a private function. + * @deprecated In the future this function will be changed to a private function. */ @Deprecated public void computeStat() { @@ -230,7 +240,7 @@ public void computeStat() { // compute stat for (SlotDescriptor d : slots) { if (!d.isMaterialized()) { - continue; + continue; } ColumnStats stats = d.getStats(); if (stats.hasAvgSerializedSize()) { @@ -243,7 +253,7 @@ public void computeStat() { } /** - * In the future this function will be changed to a private function. + * @deprecated In the future this function will be changed to a private function. */ @Deprecated public void computeMemLayout() { @@ -256,7 +266,6 @@ public void computeMemLayout() { // populate slotsBySize; also compute avgSerializedSize numNullableSlots = 0; for (SlotDescriptor d : slots) { - ColumnStats stats = d.getStats(); if (d.isMaterialized()) { slotsBySize.get(d.getType().getSlotSize()).add(d); if (d.getIsNullable()) { @@ -330,7 +339,7 @@ public boolean isCompatible(TupleDescriptor desc) { * Materialize all slots. */ public void materializeSlots() { - for (SlotDescriptor slot: slots) { + for (SlotDescriptor slot : slots) { slot.setIsMaterialized(true); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleId.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleId.java index a3424ea4674bc0..f962a320056984 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleId.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleId.java @@ -34,9 +34,14 @@ public TupleId(int id) { public static IdGenerator createGenerator() { return new IdGenerator() { @Override - public TupleId getNextId() { return new TupleId(nextId++); } + public TupleId getNextId() { + return new TupleId(nextId++); + } + @Override - public TupleId getMaxId() { return new TupleId(nextId - 1); } + public TupleId getMaxId() { + return new TupleId(nextId - 1); + } }; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleIsNullPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleIsNullPredicate.java index 9b67d4cd1ae343..7e9e422d4e74a7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleIsNullPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TupleIsNullPredicate.java @@ -182,9 +182,9 @@ public static Expr unwrapExpr(Expr expr) { if (expr instanceof FunctionCallExpr) { FunctionCallExpr fnCallExpr = (FunctionCallExpr) expr; List params = fnCallExpr.getParams().exprs(); - if (fnCallExpr.getFnName().getFunction().equals("if") && - params.get(0) instanceof TupleIsNullPredicate && - Expr.IS_NULL_LITERAL.apply(params.get(1))) { + if (fnCallExpr.getFnName().getFunction().equals("if") + && params.get(0) instanceof TupleIsNullPredicate + && Expr.IS_NULL_LITERAL.apply(params.get(1))) { return unwrapExpr(params.get(2)); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java index 0f25cafd4f6a1f..9f82878d764218 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java @@ -38,157 +38,158 @@ * Represents an anonymous type definition, e.g., used in DDL and CASTs. */ public class TypeDef implements ParseNode { - private boolean isAnalyzed; - private final Type parsedType; + private boolean isAnalyzed; + private final Type parsedType; - public TypeDef(Type parsedType) { - this.parsedType = parsedType; - } - - public static TypeDef create(PrimitiveType type) { - return new TypeDef(ScalarType.createType(type)); - } - - public static TypeDef createDecimal(int precision, int scale) { - return new TypeDef(ScalarType.createDecimalV2Type(precision, scale)); - } - - public static TypeDef createVarchar(int len) { - return new TypeDef(ScalarType.createVarchar(len)); - } - - public static TypeDef createChar(int len) { - return new TypeDef(ScalarType.createChar(len)); - } + public TypeDef(Type parsedType) { + this.parsedType = parsedType; + } - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - if (isAnalyzed) { - return; + public static TypeDef create(PrimitiveType type) { + return new TypeDef(ScalarType.createType(type)); } - // Check the max nesting depth before calling the recursive analyze() to avoid - // a stack overflow. - if (parsedType.exceedsMaxNestingDepth()) { - throw new AnalysisException(String.format( - "Type exceeds the maximum nesting depth of %s:\n%s", - Type.MAX_NESTING_DEPTH, parsedType.toSql())); + + public static TypeDef createDecimal(int precision, int scale) { + return new TypeDef(ScalarType.createDecimalV2Type(precision, scale)); } - analyze(parsedType); - isAnalyzed = true; - } - private void analyze(Type type) throws AnalysisException { - if (!type.isSupported()) { - throw new AnalysisException("Unsupported data type: " + type.toSql()); + public static TypeDef createVarchar(int len) { + return new TypeDef(ScalarType.createVarchar(len)); } - if (type.isScalarType()) { - analyzeScalarType((ScalarType) type); + + public static TypeDef createChar(int len) { + return new TypeDef(ScalarType.createChar(len)); } - if (type.isComplexType()) { - if (!Config.enable_complex_type_support) { - throw new AnalysisException("Unsupported data type: " + type.toSql()); - } - if (type.isArrayType()) { - Type itemType = ((ArrayType) type).getItemType(); - if (itemType instanceof ScalarType) { - analyzeNestedType((ScalarType) itemType); + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { + if (isAnalyzed) { + return; } - } - if (type.isMapType()) { - ScalarType keyType = (ScalarType) ((MapType) type).getKeyType(); - ScalarType valueType = (ScalarType) ((MapType) type).getKeyType(); - analyzeNestedType(keyType); - analyzeNestedType(valueType); - } - if (type.isStructType()) { - ArrayList fields = ((StructType) type).getFields(); - for (int i = 0; i < fields.size(); i++) { - ScalarType filedType = (ScalarType) fields.get(i).getType(); - analyzeNestedType(filedType); + // Check the max nesting depth before calling the recursive analyze() to avoid + // a stack overflow. + if (parsedType.exceedsMaxNestingDepth()) { + throw new AnalysisException( + String.format("Type exceeds the maximum nesting depth of %s:\n%s", Type.MAX_NESTING_DEPTH, + parsedType.toSql())); } - } + analyze(parsedType); + isAnalyzed = true; } - } - private void analyzeNestedType(ScalarType type) throws AnalysisException { - if (type.isNull()) { - throw new AnalysisException("Unsupported data type: " + type.toSql()); - } - if (type.getPrimitiveType().isStringType() - && !type.isAssignedStrLenInColDefinition()) { - type.setLength(1); - } - analyze(type); - } - - private void analyzeScalarType(ScalarType scalarType) - throws AnalysisException { - PrimitiveType type = scalarType.getPrimitiveType(); - switch (type) { - case CHAR: - case VARCHAR: { - String name; - int maxLen; - if (type == PrimitiveType.VARCHAR) { - name = "VARCHAR"; - maxLen = ScalarType.MAX_VARCHAR_LENGTH; - } else if (type == PrimitiveType.CHAR) { - name = "CHAR"; - maxLen = ScalarType.MAX_CHAR_LENGTH; - } else { - Preconditions.checkState(false); - return; + private void analyze(Type type) throws AnalysisException { + if (!type.isSupported()) { + throw new AnalysisException("Unsupported data type: " + type.toSql()); } - int len = scalarType.getLength(); - // len is decided by child, when it is -1. - - if (len <= 0) { - throw new AnalysisException(name + " size must be > 0: " + len); + if (type.isScalarType()) { + analyzeScalarType((ScalarType) type); } - if (scalarType.getLength() > maxLen) { - throw new AnalysisException( - name + " size must be <= " + maxLen + ": " + len); + + if (type.isComplexType()) { + if (!Config.enable_complex_type_support) { + throw new AnalysisException("Unsupported data type: " + type.toSql()); + } + if (type.isArrayType()) { + Type itemType = ((ArrayType) type).getItemType(); + if (itemType instanceof ScalarType) { + analyzeNestedType((ScalarType) itemType); + } + } + if (type.isMapType()) { + ScalarType keyType = (ScalarType) ((MapType) type).getKeyType(); + ScalarType valueType = (ScalarType) ((MapType) type).getKeyType(); + analyzeNestedType(keyType); + analyzeNestedType(valueType); + } + if (type.isStructType()) { + ArrayList fields = ((StructType) type).getFields(); + for (int i = 0; i < fields.size(); i++) { + ScalarType filedType = (ScalarType) fields.get(i).getType(); + analyzeNestedType(filedType); + } + } } - break; - } - case DECIMALV2: { - int precision = scalarType.decimalPrecision(); - int scale = scalarType.decimalScale(); - // precision: [1, 27] - if (precision < 1 || precision > 27) { - throw new AnalysisException("Precision of decimal must between 1 and 27." - + " Precision was set to: " + precision + "."); + } + + private void analyzeNestedType(ScalarType type) throws AnalysisException { + if (type.isNull()) { + throw new AnalysisException("Unsupported data type: " + type.toSql()); } - // scale: [0, 9] - if (scale < 0 || scale > 9) { - throw new AnalysisException("Scale of decimal must between 0 and 9." - + " Scale was set to: " + scale + "."); + if (type.getPrimitiveType().isStringType() + && !type.isAssignedStrLenInColDefinition()) { + type.setLength(1); } - // scale < precision - if (scale >= precision) { - throw new AnalysisException("Scale of decimal must be smaller than precision." - + " Scale is " + scale + " and precision is " + precision); + analyze(type); + } + + private void analyzeScalarType(ScalarType scalarType) + throws AnalysisException { + PrimitiveType type = scalarType.getPrimitiveType(); + switch (type) { + case CHAR: + case VARCHAR: { + String name; + int maxLen; + if (type == PrimitiveType.VARCHAR) { + name = "VARCHAR"; + maxLen = ScalarType.MAX_VARCHAR_LENGTH; + } else if (type == PrimitiveType.CHAR) { + name = "CHAR"; + maxLen = ScalarType.MAX_CHAR_LENGTH; + } else { + Preconditions.checkState(false); + return; + } + int len = scalarType.getLength(); + // len is decided by child, when it is -1. + + if (len <= 0) { + throw new AnalysisException(name + " size must be > 0: " + len); + } + if (scalarType.getLength() > maxLen) { + throw new AnalysisException( + name + " size must be <= " + maxLen + ": " + len); + } + break; + } + case DECIMALV2: { + int precision = scalarType.decimalPrecision(); + int scale = scalarType.decimalScale(); + // precision: [1, 27] + if (precision < 1 || precision > 27) { + throw new AnalysisException("Precision of decimal must between 1 and 27." + + " Precision was set to: " + precision + "."); + } + // scale: [0, 9] + if (scale < 0 || scale > 9) { + throw new AnalysisException("Scale of decimal must between 0 and 9." + + " Scale was set to: " + scale + "."); + } + // scale < precision + if (scale >= precision) { + throw new AnalysisException("Scale of decimal must be smaller than precision." + + " Scale is " + scale + " and precision is " + precision); + } + break; + } + case INVALID_TYPE: + throw new AnalysisException("Invalid type."); + default: + break; } - break; - } - case INVALID_TYPE: - throw new AnalysisException("Invalid type."); - default: break; } - } - public Type getType() { - return parsedType; - } + public Type getType() { + return parsedType; + } - @Override - public String toString() { - return parsedType.toSql(); - } + @Override + public String toString() { + return parsedType.toSql(); + } - @Override - public String toSql() { - return parsedType.toSql(); - } + @Override + public String toSql() { + return parsedType.toSql(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java index 03e36c201ef222..a9f3951bff7287 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java @@ -36,9 +36,17 @@ public ValueList(List> rows) { this.rows = rows; } - public List> getRows() { return rows; } - public void addRow(ArrayList row) { rows.add(row); } - public ArrayList getFirstRow() { return rows.get(0); } + public List> getRows() { + return rows; + } + + public void addRow(ArrayList row) { + rows.add(row); + } + + public ArrayList getFirstRow() { + return rows.get(0); + } public void analyzeForSelect(Analyzer analyzer) throws AnalysisException { if (rows.isEmpty()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java index eb631146dc2c9f..44ab177c25e985 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java @@ -83,14 +83,14 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { if (analyzer.isExplain()) { withClauseAnalyzer.setIsExplain(); } - for (View view: views) { + for (View view : views) { Analyzer viewAnalyzer = new Analyzer(withClauseAnalyzer); view.getQueryStmt().analyze(viewAnalyzer); // Register this view so that the next view can reference it. withClauseAnalyzer.registerLocalView(view); } // Register all local views with the analyzer. - for (View localView: withClauseAnalyzer.getLocalViews().values()) { + for (View localView : withClauseAnalyzer.getLocalViews().values()) { analyzer.registerLocalView(localView); } } @@ -101,14 +101,14 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { private WithClause(WithClause other) { Preconditions.checkNotNull(other); views = Lists.newArrayList(); - for (View view: other.views) { + for (View view : other.views) { views.add(new View(view.getName(), view.getQueryStmt().clone(), view.getOriginalColLabels())); } } public void reset() { - for (View view: views) { + for (View view : views) { view.getQueryStmt().reset(); } } @@ -130,12 +130,14 @@ public void getTableRefs(Analyzer analyzer, List tblRefs, Set } @Override - public WithClause clone() { return new WithClause(this); } + public WithClause clone() { + return new WithClause(this); + } @Override public String toSql() { List viewStrings = Lists.newArrayList(); - for (View view: views) { + for (View view : views) { // Enclose the view alias and explicit labels in quotes if Hive cannot parse it // without quotes. This is needed for view compatibility between Impala and Hive. String aliasSql = ToSqlUtils.getIdentSql(view.getName()); @@ -163,5 +165,7 @@ public String toDigest() { return "WITH " + Joiner.on(",").join(viewStrings); } - public List getViews() { return views; } + public List getViews() { + return views; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java index d6a0824788a897..8215e6c1d417d1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java @@ -191,11 +191,11 @@ protected void runAfterCatalogReady() { // handle create repository stmt public void createRepository(CreateRepositoryStmt stmt) throws DdlException { if (!catalog.getBrokerMgr().containsBroker(stmt.getBrokerName()) - && stmt.getStorageType() == StorageBackend.StorageType.BROKER) { + && stmt.getStorageType() == StorageBackend.StorageType.BROKER) { ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "broker does not exist: " + stmt.getBrokerName()); } - BlobStorage storage = BlobStorage.create(stmt.getBrokerName(),stmt.getStorageType(), stmt.getProperties()); + BlobStorage storage = BlobStorage.create(stmt.getBrokerName(), stmt.getStorageType(), stmt.getProperties()); long repoId = catalog.getNextId(); Repository repo = new Repository(repoId, stmt.getName(), stmt.isReadOnly(), stmt.getLocation(), storage); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index 9e48700fd33a1d..69b638a3cd6f06 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -91,7 +91,6 @@ public enum BackupJobState { // all objects which need backup private List tableRefs = Lists.newArrayList(); -// private BackupContent content = BackupContent.ALL; private BackupJobState state; @@ -364,7 +363,7 @@ private void prepareAndSendSnapshotTask() { status = new Status(ErrCode.NOT_FOUND, "table " + tblName + " does not exist"); return; } - switch (tbl.getType()){ + switch (tbl.getType()) { case OLAP: checkOlapTable((OlapTable) tbl, tableRef); if (getContent() == BackupContent.ALL) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java index c871412978e675..2c32005d0aa13a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java @@ -521,7 +521,7 @@ public static BackupJobInfo fromCatalog(long backupTime, String label, String db partitionInfo.indexes.put(olapTbl.getIndexNameById(index.getId()), idxInfo); // tablets if (content == BackupContent.METADATA_ONLY) { - for (Tablet tablet: index.getTablets()) { + for (Tablet tablet : index.getTablets()) { idxInfo.tablets.put(tablet.getId(), Lists.newArrayList()); } } else { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java index e1edd61a575274..9295095f4013f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java @@ -87,8 +87,7 @@ public String getBrokerName() { @Override public Status downloadWithFileSize(String remoteFilePath, String localFilePath, long fileSize) { - LOG.debug("download from {} to {}, file size: {}.", - remoteFilePath, localFilePath, fileSize); + LOG.debug("download from {} to {}, file size: {}.", remoteFilePath, localFilePath, fileSize); long start = System.currentTimeMillis(); @@ -104,7 +103,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, TBrokerFD fd; try { TBrokerOpenReaderRequest req = new TBrokerOpenReaderRequest(TBrokerVersion.VERSION_ONE, remoteFilePath, - 0, clientId(), getProperties()); + 0, clientId(), getProperties()); TBrokerOpenReaderResponse rep = client.openReader(req); TBrokerOperationStatus opst = rep.getOpStatus(); if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) { @@ -115,7 +114,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, fd = rep.getFd(); LOG.info("finished to open reader. fd: {}. download {} to {}.", - fd, remoteFilePath, localFilePath); + fd, remoteFilePath, localFilePath); } catch (TException e) { return new Status(Status.ErrCode.COMMON_ERROR, "failed to open reader on broker " + BrokerUtil.printBroker(getName(), address) @@ -159,7 +158,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, while (tryTimes < 3) { try { TBrokerPReadRequest req = new TBrokerPReadRequest(TBrokerVersion.VERSION_ONE, - fd, readOffset, readLen); + fd, readOffset, readLen); rep = client.pread(req); if (rep.getOpStatus().getStatusCode() != TBrokerOperationStatusCode.OK) { // pread return failure. @@ -175,7 +174,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } if (rep.opStatus.statusCode != TBrokerOperationStatusCode.END_OF_FILE) { LOG.debug("download. readLen: {}, read data len: {}, left size:{}. total size: {}", - readLen, rep.getData().length, leftSize, fileSize); + readLen, rep.getData().length, leftSize, fileSize); } else { LOG.debug("read eof: " + remoteFilePath); } @@ -223,9 +222,9 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, // I don't know why, but have to adapt to it. if (rep.getData().length != readLen) { LOG.warn("the actual read length does not equal to " - + "the expected read length: {} vs. {}, file: {}, broker: {}", - rep.getData().length, readLen, remoteFilePath, - BrokerUtil.printBroker(getName(), address)); + + "the expected read length: {} vs. {}, file: {}, broker: {}", + rep.getData().length, readLen, remoteFilePath, + BrokerUtil.printBroker(getName(), address)); } out.write(rep.getData()); @@ -237,8 +236,8 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } } // end of reading remote file } catch (IOException e) { - return new Status(Status.ErrCode.COMMON_ERROR, "Got exception: " + e.getMessage() + ", broker: " + - BrokerUtil.printBroker(getName(), address)); + return new Status(Status.ErrCode.COMMON_ERROR, "Got exception: " + e.getMessage() + ", broker: " + + BrokerUtil.printBroker(getName(), address)); } finally { // close broker reader Status closeStatus = closeReader(client, address, fd); @@ -254,8 +253,8 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } } - LOG.info("finished to download from {} to {} with size: {}. cost {} ms", remoteFilePath, localFilePath, - fileSize, (System.currentTimeMillis() - start)); + LOG.info("finished to download from {} to {} with size: {}. cost {} ms", + remoteFilePath, localFilePath, fileSize, (System.currentTimeMillis() - start)); return status; } @@ -421,7 +420,7 @@ public Status upload(String localPath, String remotePath) { if (status.ok()) { LOG.info("finished to upload {} to remote path {}. cost: {} ms", - localPath, remotePath, (System.currentTimeMillis() - start)); + localPath, remotePath, (System.currentTimeMillis() - start)); } return status; } @@ -440,8 +439,8 @@ public Status rename(String origFilePath, String destFilePath) { // 2. rename boolean needReturn = true; try { - TBrokerRenamePathRequest req = new TBrokerRenamePathRequest(TBrokerVersion.VERSION_ONE, origFilePath, - destFilePath, getProperties()); + TBrokerRenamePathRequest req = new TBrokerRenamePathRequest(TBrokerVersion.VERSION_ONE, + origFilePath, destFilePath, getProperties()); TBrokerOperationStatus ost = client.renamePath(req); if (ost.getStatusCode() != TBrokerOperationStatusCode.OK) { return new Status(Status.ErrCode.COMMON_ERROR, @@ -462,7 +461,7 @@ public Status rename(String origFilePath, String destFilePath) { } LOG.info("finished to rename {} to {}. cost: {} ms", - origFilePath, destFilePath, (System.currentTimeMillis() - start)); + origFilePath, destFilePath, (System.currentTimeMillis() - start)); return Status.OK; } @@ -479,8 +478,8 @@ public Status delete(String remotePath) { // delete boolean needReturn = true; try { - TBrokerDeletePathRequest req = new TBrokerDeletePathRequest(TBrokerVersion.VERSION_ONE, remotePath, - getProperties()); + TBrokerDeletePathRequest req = new TBrokerDeletePathRequest(TBrokerVersion.VERSION_ONE, + remotePath, getProperties()); TBrokerOperationStatus opst = client.deletePath(req); if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) { return new Status(Status.ErrCode.COMMON_ERROR, @@ -521,7 +520,7 @@ public Status list(String remotePath, List result) { boolean needReturn = true; try { TBrokerListPathRequest req = new TBrokerListPathRequest(TBrokerVersion.VERSION_ONE, remotePath, - false /* not recursive */, getProperties()); + false /* not recursive */, getProperties()); req.setFileNameOnly(true); TBrokerListResponse rep = client.listPath(req); TBrokerOperationStatus opst = rep.getOpStatus(); @@ -572,7 +571,7 @@ public Status checkPathExist(String remotePath) { boolean needReturn = true; try { TBrokerCheckPathExistRequest req = new TBrokerCheckPathExistRequest(TBrokerVersion.VERSION_ONE, - remotePath, getProperties()); + remotePath, getProperties()); TBrokerCheckPathExistResponse rep = client.checkPathExist(req); TBrokerOperationStatus opst = rep.getOpStatus(); if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) { @@ -636,7 +635,7 @@ private Status openWriter(TPaloBrokerService.Client client, TNetworkAddress addr TBrokerFD fd) { try { TBrokerOpenWriterRequest req = new TBrokerOpenWriterRequest(TBrokerVersion.VERSION_ONE, - remoteFile, TBrokerOpenMode.APPEND, clientId(), getProperties()); + remoteFile, TBrokerOpenMode.APPEND, clientId(), getProperties()); TBrokerOpenWriterResponse rep = client.openWriter(req); TBrokerOperationStatus opst = rep.getOpStatus(); if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) { @@ -647,8 +646,7 @@ private Status openWriter(TPaloBrokerService.Client client, TNetworkAddress addr fd.setHigh(rep.getFd().getHigh()); fd.setLow(rep.getFd().getLow()); - LOG.info("finished to open writer. fd: {}. directly upload to remote path {}.", - fd, remoteFile); + LOG.info("finished to open writer. fd: {}. directly upload to remote path {}.", fd, remoteFile); } catch (TException e) { return new Status(Status.ErrCode.BAD_CONNECTION, "failed to open writer on broker " + BrokerUtil.printBroker(getName(), address) diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index e638909045fbb5..9445f7441365b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -99,6 +99,7 @@ public class RestoreJob extends AbstractJob { private static final Logger LOG = LogManager.getLogger(RestoreJob.class); + // CHECKSTYLE OFF public enum RestoreJobState { PENDING, // Job is newly created. Check and prepare meta in catalog. Create replica if necessary. // Waiting for replica creation finished synchronously, then sending snapshot tasks. @@ -112,6 +113,7 @@ public enum RestoreJobState { FINISHED, CANCELLED } + // CHECKSTYLE ON private String backupTimestamp; @@ -1438,7 +1440,7 @@ private Status allTabletCommitted(boolean isReplay) { for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { for (Tablet tablet : idx.getTablets()) { for (Replica replica : tablet.getReplicas()) { - if (!replica.checkVersionCatchUp(part.getVisibleVersion(),false)) { + if (!replica.checkVersionCatchUp(part.getVisibleVersion(), false)) { replica.updateVersionInfo(part.getVisibleVersion(), replica.getDataSize(), replica.getRowCount()); } @@ -1759,7 +1761,7 @@ public void write(DataOutput out) throws IOException { } out.writeInt(restoredResources.size()); - for (Resource resource: restoredResources) { + for (Resource resource : restoredResources) { resource.write(out); } @@ -1818,7 +1820,7 @@ public void readFields(DataInput in) throws IOException { long partId = in.readLong(); long version = in.readLong(); // Useless but read it to compatible with meta - long versionHash = in.readLong(); + long versionHash = in.readLong(); // CHECKSTYLE IGNORE THIS LINE restoredVersionInfo.put(tblId, partId, version); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java index 207930b18b6c0b..49cd7ab9f17317 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java @@ -80,13 +80,13 @@ public List getSqlBlockRule(ShowSqlBlockRuleStmt stmt) throws Anal // check limitation's effectiveness of a sql_block_rule public static void verifyLimitations(SqlBlockRule sqlBlockRule) throws DdlException { - if (sqlBlockRule.getPartitionNum() < 0){ + if (sqlBlockRule.getPartitionNum() < 0) { throw new DdlException("the value of partition_num can't be a negative"); } - if (sqlBlockRule.getTabletNum() < 0){ + if (sqlBlockRule.getTabletNum() < 0) { throw new DdlException("the value of tablet_num can't be a negative"); } - if (sqlBlockRule.getCardinality() < 0){ + if (sqlBlockRule.getCardinality() < 0) { throw new DdlException("the value of cardinality can't be a negative"); } } @@ -210,12 +210,14 @@ public void matchSql(String originSql, String sqlHash, String user) throws Analy public void matchSql(SqlBlockRule rule, String originSql, String sqlHash) throws AnalysisException { if (rule.getEnable()) { - if (StringUtils.isNotEmpty(rule.getSqlHash()) && - (!CreateSqlBlockRuleStmt.STRING_NOT_SET.equals(rule.getSqlHash()) && rule.getSqlHash().equals(sqlHash))) { + if (StringUtils.isNotEmpty(rule.getSqlHash()) + && (!CreateSqlBlockRuleStmt.STRING_NOT_SET.equals(rule.getSqlHash()) + && rule.getSqlHash().equals(sqlHash))) { MetricRepo.COUNTER_HIT_SQL_BLOCK_RULE.increase(1L); throw new AnalysisException("sql match hash sql block rule: " + rule.getName()); - } else if (StringUtils.isNotEmpty(rule.getSql()) && - (!CreateSqlBlockRuleStmt.STRING_NOT_SET.equals(rule.getSql()) && rule.getSqlPattern().matcher(originSql).find())) { + } else if (StringUtils.isNotEmpty(rule.getSql()) + && (!CreateSqlBlockRuleStmt.STRING_NOT_SET.equals(rule.getSql()) + && rule.getSqlPattern().matcher(originSql).find())) { MetricRepo.COUNTER_HIT_SQL_BLOCK_RULE.increase(1L); throw new AnalysisException("sql match regex sql block rule: " + rule.getName()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java index 9230fae4b8b051..8e1c0631acb1ab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java @@ -405,31 +405,89 @@ public AggregateFunction build() { } } - public String getUpdateFnSymbol() { return updateFnSymbol; } - public String getInitFnSymbol() { return initFnSymbol; } - public String getSerializeFnSymbol() { return serializeFnSymbol; } - public String getMergeFnSymbol() { return mergeFnSymbol; } - public String getGetValueFnSymbol() { return getValueFnSymbol; } - public String getRemoveFnSymbol() { return removeFnSymbol; } - public String getFinalizeFnSymbol() { return finalizeFnSymbol; } - public boolean ignoresDistinct() { return ignoresDistinct; } - public boolean isAnalyticFn() { return isAnalyticFn; } - public boolean isAggregateFn() { return isAggregateFn; } - public boolean returnsNonNullOnEmpty() { return returnsNonNullOnEmpty; } + public String getUpdateFnSymbol() { + return updateFnSymbol; + } + + public String getInitFnSymbol() { + return initFnSymbol; + } + + public String getSerializeFnSymbol() { + return serializeFnSymbol; + } + + public String getMergeFnSymbol() { + return mergeFnSymbol; + } + + public String getGetValueFnSymbol() { + return getValueFnSymbol; + } + + public String getRemoveFnSymbol() { + return removeFnSymbol; + } + + public String getFinalizeFnSymbol() { + return finalizeFnSymbol; + } + + public boolean ignoresDistinct() { + return ignoresDistinct; + } + + public boolean isAnalyticFn() { + return isAnalyticFn; + } + + public boolean isAggregateFn() { + return isAggregateFn; + } + + public boolean returnsNonNullOnEmpty() { + return returnsNonNullOnEmpty; + } /** * Returns the intermediate type of this aggregate function or null * if it is identical to the return type. */ - public Type getIntermediateType() { return intermediateType; } - public void setUpdateFnSymbol(String fn) { updateFnSymbol = fn; } - public void setInitFnSymbol(String fn) { initFnSymbol = fn; } - public void setSerializeFnSymbol(String fn) { serializeFnSymbol = fn; } - public void setMergeFnSymbol(String fn) { mergeFnSymbol = fn; } - public void setGetValueFnSymbol(String fn) { getValueFnSymbol = fn; } - public void setRemoveFnSymbol(String fn) { removeFnSymbol = fn; } - public void setFinalizeFnSymbol(String fn) { finalizeFnSymbol = fn; } - public void setIntermediateType(Type t) { intermediateType = t; } + public Type getIntermediateType() { + return intermediateType; + } + + public void setUpdateFnSymbol(String fn) { + updateFnSymbol = fn; + } + + public void setInitFnSymbol(String fn) { + initFnSymbol = fn; + } + + public void setSerializeFnSymbol(String fn) { + serializeFnSymbol = fn; + } + + public void setMergeFnSymbol(String fn) { + mergeFnSymbol = fn; + } + + public void setGetValueFnSymbol(String fn) { + getValueFnSymbol = fn; + } + + public void setRemoveFnSymbol(String fn) { + removeFnSymbol = fn; + } + + public void setFinalizeFnSymbol(String fn) { + finalizeFnSymbol = fn; + } + + public void setIntermediateType(Type t) { + intermediateType = t; + } @Override public String toSql(boolean ifNotExists) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java index 1458f63c38ea96..50c3c40e781e74 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java @@ -948,9 +948,10 @@ private void getClusterIdAndRole() throws IOException { // But is metadata_failure_recovery is true, we will not check it because this may be a FE migration. String[] split = nodeName.split("_"); if (Config.metadata_failure_recovery.equals("false") && !selfNode.first.equalsIgnoreCase(split[0])) { - throw new IOException("the self host " + selfNode.first + " does not equal to the host in ROLE" + - " file " + split[0] + ". You need to set 'priority_networks' config in fe.conf to match" + - " the host " + split[0]); + throw new IOException("the self host " + selfNode.first + + " does not equal to the host in ROLE" + + " file " + split[0] + ". You need to set 'priority_networks' config" + + " in fe.conf to match the host " + split[0]); } } } @@ -960,8 +961,7 @@ private void getClusterIdAndRole() throws IOException { if (!versionFile.exists()) { clusterId = Config.cluster_id == -1 ? Storage.newClusterID() : Config.cluster_id; - token = Strings.isNullOrEmpty(Config.auth_token) ? - Storage.newToken() : Config.auth_token; + token = Strings.isNullOrEmpty(Config.auth_token) ? Storage.newToken() : Config.auth_token; storage = new Storage(clusterId, token, this.imageDir); storage.writeClusterIdAndToken(); @@ -974,8 +974,7 @@ private void getClusterIdAndRole() throws IOException { } else { clusterId = storage.getClusterID(); if (storage.getToken() == null) { - token = Strings.isNullOrEmpty(Config.auth_token) ? - Storage.newToken() : Config.auth_token; + token = Strings.isNullOrEmpty(Config.auth_token) ? Storage.newToken() : Config.auth_token; LOG.info("new token={}", token); storage.setToken(token); storage.writeClusterIdAndToken(); @@ -1488,8 +1487,8 @@ private void checkLowerCaseTableNames() { } } if (Config.lower_case_table_names != GlobalVariable.lowerCaseTableNames) { - LOG.error("The configuration of \'lower_case_table_names\' does not support modification, " + - "the expected value is {}, but the actual value is {}", + LOG.error("The configuration of \'lower_case_table_names\' does not support modification, " + + "the expected value is {}, but the actual value is {}", GlobalVariable.lowerCaseTableNames, Config.lower_case_table_names); System.exit(-1); } @@ -1508,8 +1507,8 @@ private void checkCurrentNodeExist() { Frontend fe = checkFeExist(selfNode.first, selfNode.second); if (fe == null) { - LOG.error("current node {}:{} is not added to the cluster, will exit." + - " Your FE IP maybe changed, please set 'priority_networks' config in fe.conf properly.", + LOG.error("current node {}:{} is not added to the cluster, will exit." + + " Your FE IP maybe changed, please set 'priority_networks' config in fe.conf properly.", selfNode.first, selfNode.second); System.exit(-1); } else if (fe.getRole() != role) { @@ -2681,9 +2680,9 @@ public void dropDb(DropDbStmt stmt) throws DdlException { try { if (!stmt.isForceDrop()) { if (Catalog.getCurrentCatalog().getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) { - throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " + - "The database [" + dbName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP database FORCE\"."); + throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " + + "The database [" + dbName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP database FORCE\"."); } } if (db.getDbState() == DbState.LINK && dbName.equals(db.getAttachDb())) { @@ -2729,9 +2728,9 @@ public void dropDb(DropDbStmt stmt) throws DdlException { if (table.getType() == TableType.OLAP) { OlapTable olapTable = (OlapTable) table; if (olapTable.getState() != OlapTableState.NORMAL) { - throw new DdlException("The table [" + olapTable.getState() + "]'s state is " + olapTable.getState() + ", cannot be dropped." + - " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP table FORCE\"."); + throw new DdlException("The table [" + olapTable.getState() + "]'s state is " + olapTable.getState() + ", cannot be dropped." + + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP table FORCE\"."); } } } @@ -3486,9 +3485,9 @@ public void dropPartition(Database db, OlapTable olapTable, DropPartitionClause Partition partition = olapTable.getPartition(partitionName); if (partition != null) { if (Catalog.getCurrentCatalog().getGlobalTransactionMgr().existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) { - throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed." + - " The partition [" + partitionName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP partition FORCE\"."); + throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed." + + " The partition [" + partitionName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP partition FORCE\"."); } } } @@ -4472,8 +4471,8 @@ public static void getDdlStmt(DdlStmt ddlStmt, String dbName, Table table, List< // 2. add partition if (separatePartition && (table instanceof OlapTable) && ((OlapTable) table).getPartitions().size() > 1) { - if (((OlapTable) table).getPartitionInfo().getType() == PartitionType.RANGE || - ((OlapTable) table).getPartitionInfo().getType() == PartitionType.LIST) { + if (((OlapTable) table).getPartitionInfo().getType() == PartitionType.RANGE + || ((OlapTable) table).getPartitionInfo().getType() == PartitionType.LIST) { OlapTable olapTable = (OlapTable) table; PartitionInfo partitionInfo = olapTable.getPartitionInfo(); boolean first = true; @@ -4687,9 +4686,9 @@ public void dropTable(DropTableStmt stmt) throws DdlException { if (!stmt.isForceDrop()) { if (Catalog.getCurrentCatalog().getGlobalTransactionMgr().existCommittedTxns(db.getId(), table.getId(), null)) { - throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " + - "The table [" + tableName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP table FORCE\"."); + throw new DdlException("There are still some transactions in the COMMITTED state waiting to be completed. " + + "The table [" + tableName + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP table FORCE\"."); } } DropInfo info = new DropInfo(db.getId(), table.getId(), -1L, stmt.isForceDrop()); @@ -4698,9 +4697,9 @@ public void dropTable(DropTableStmt stmt) throws DdlException { if (table instanceof OlapTable && !stmt.isForceDrop()) { OlapTable olapTable = (OlapTable) table; if ((olapTable.getState() != OlapTableState.NORMAL)) { - throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState() + ", cannot be dropped." + - " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP table FORCE\"."); + throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState() + ", cannot be dropped." + + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP table FORCE\"."); } } unprotectDropTable(db, table, stmt.isForceDrop(), false); @@ -4928,8 +4927,8 @@ public Optional getDb(long dbId) { return Optional.ofNullable(getDbNullable(dbId)); } - public Database - getDbOrException(String dbName, java.util.function.Function e) throws E { + public Database getDbOrException( + String dbName, java.util.function.Function e) throws E { Database db = getDbNullable(dbName); if (db == null) { throw e.apply(dbName); @@ -4937,8 +4936,7 @@ public Optional getDb(long dbId) { return db; } - public Database - getDbOrException(long dbId, java.util.function.Function e) throws E { + public Database getDbOrException(long dbId, java.util.function.Function e) throws E { Database db = getDbNullable(dbId); if (db == null) { throw e.apply(dbId); @@ -5781,9 +5779,10 @@ public void modifyTableReplicaAllocation(Database db, OlapTable table, Map (Config.storage_flood_stage_usage_percent / 100.0); + return diskAvailableCapacityB < Config.storage_flood_stage_left_capacity_bytes + && (double) (totalCapacityB - diskAvailableCapacityB) / totalCapacityB + > (Config.storage_flood_stage_usage_percent / 100.0); } else { - return diskAvailableCapacityB < Config.storage_min_left_capacity_bytes || - (double) (totalCapacityB - diskAvailableCapacityB) / totalCapacityB > (Config.storage_high_watermark_usage_percent / 100.0); + return diskAvailableCapacityB < Config.storage_min_left_capacity_bytes + || (double) (totalCapacityB - diskAvailableCapacityB) / totalCapacityB + > (Config.storage_high_watermark_usage_percent / 100.0); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java index ca8bf9002ec55e..83ba5b67a6c3e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java @@ -199,18 +199,18 @@ public String getSortedReservedHistoryPeriods(String reservedHistoryPeriods, Str */ public String getProperties(ReplicaAllocation tableReplicaAlloc) { ReplicaAllocation tmpAlloc = this.replicaAlloc.isNotSet() ? tableReplicaAlloc : this.replicaAlloc; - String res = ",\n\"" + ENABLE + "\" = \"" + enable + "\"" + - ",\n\"" + TIME_UNIT + "\" = \"" + timeUnit + "\"" + - ",\n\"" + TIME_ZONE + "\" = \"" + tz.getID() + "\"" + - ",\n\"" + START + "\" = \"" + start + "\"" + - ",\n\"" + END + "\" = \"" + end + "\"" + - ",\n\"" + PREFIX + "\" = \"" + prefix + "\"" + - ",\n\"" + REPLICATION_ALLOCATION + "\" = \"" + tmpAlloc.toCreateStmt() + "\"" + - ",\n\"" + BUCKETS + "\" = \"" + buckets + "\"" + - ",\n\"" + CREATE_HISTORY_PARTITION + "\" = \"" + createHistoryPartition + "\"" + - ",\n\"" + HISTORY_PARTITION_NUM + "\" = \"" + historyPartitionNum + "\"" + - ",\n\"" + HOT_PARTITION_NUM + "\" = \"" + hotPartitionNum + "\"" + - ",\n\"" + RESERVED_HISTORY_PERIODS + "\" = \"" + reservedHistoryPeriods + "\""; + String res = ",\n\"" + ENABLE + "\" = \"" + enable + "\"" + + ",\n\"" + TIME_UNIT + "\" = \"" + timeUnit + "\"" + + ",\n\"" + TIME_ZONE + "\" = \"" + tz.getID() + "\"" + + ",\n\"" + START + "\" = \"" + start + "\"" + + ",\n\"" + END + "\" = \"" + end + "\"" + + ",\n\"" + PREFIX + "\" = \"" + prefix + "\"" + + ",\n\"" + REPLICATION_ALLOCATION + "\" = \"" + tmpAlloc.toCreateStmt() + "\"" + + ",\n\"" + BUCKETS + "\" = \"" + buckets + "\"" + + ",\n\"" + CREATE_HISTORY_PARTITION + "\" = \"" + createHistoryPartition + "\"" + + ",\n\"" + HISTORY_PARTITION_NUM + "\" = \"" + historyPartitionNum + "\"" + + ",\n\"" + HOT_PARTITION_NUM + "\" = \"" + hotPartitionNum + "\"" + + ",\n\"" + RESERVED_HISTORY_PERIODS + "\" = \"" + reservedHistoryPeriods + "\""; if (getTimeUnit().equalsIgnoreCase(TimeUnit.WEEK.toString())) { res += ",\n\"" + START_DAY_OF_WEEK + "\" = \"" + startOfWeek.dayOfWeek + "\""; } else if (getTimeUnit().equalsIgnoreCase(TimeUnit.MONTH.toString())) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java index c1c89155d21cc8..6981d628574fc0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java @@ -428,8 +428,8 @@ public void syncTableMetaData(EsRestClient client) { esMetaStateTracker.run(); this.esTablePartitions = esMetaStateTracker.searchContext().tablePartitions(); } catch (Throwable e) { - LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster." + - "table id: {}, err: {}", this.name, this.id, e.getMessage()); + LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster." + + "table id: {}, err: {}", this.name, this.id, e.getMessage()); this.esTablePartitions = null; this.lastMetaDataSyncException = e; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java index 49ba2b6f361e01..7e609c5070dc95 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java @@ -228,10 +228,21 @@ public void setHasVarArgs(boolean v) { hasVarArgs = v; } - public void setId(long functionId) { this.id = functionId; } - public long getId() { return id; } - public void setChecksum(String checksum) { this.checksum = checksum; } - public String getChecksum() { return checksum; } + public void setId(long functionId) { + this.id = functionId; + } + + public long getId() { + return id; + } + + public void setChecksum(String checksum) { + this.checksum = checksum; + } + + public String getChecksum() { + return checksum; + } // TODO(cmy): Currently we judge whether it is UDF by wheter the 'location' is set. // Maybe we should use a separate variable to identify, diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSearchDesc.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSearchDesc.java index d357ff25f7bd07..3eed1ab4996971 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSearchDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSearchDesc.java @@ -38,9 +38,17 @@ public FunctionSearchDesc(FunctionName name, Type[] argTypes, boolean isVariadic this.isVariadic = isVariadic; } - public FunctionName getName() { return name; } - public Type[] getArgTypes() { return argTypes; } - public boolean isVariadic() { return isVariadic; } + public FunctionName getName() { + return name; + } + + public Type[] getArgTypes() { + return argTypes; + } + + public boolean isVariadic() { + return isVariadic; + } public boolean isIdentical(Function function) { if (!name.equals(function.getFunctionName())) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java index 4b94052643b36e..63c49b2f141606 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java @@ -178,39 +178,39 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map MIN_UPDATE_SYMBOL = - ImmutableMap.builder() - .put(Type.BOOLEAN, - "3minIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.TINYINT, - "3minIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.SMALLINT, - "3minIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.INT, - "3minIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.BIGINT, - "3minIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.FLOAT, - "3minIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DOUBLE, - "3minIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") - // .put(Type.CHAR, - // "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.VARCHAR, - "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.STRING, - "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATE, - "3minIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATETIME, - "3minIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DECIMALV2, - "3minIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.LARGEINT, - "3minIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .build(); + ImmutableMap.builder() + .put(Type.BOOLEAN, + "3minIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.TINYINT, + "3minIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.SMALLINT, + "3minIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.INT, + "3minIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.BIGINT, + "3minIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.FLOAT, + "3minIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DOUBLE, + "3minIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") + // .put(Type.CHAR, + // "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.VARCHAR, + "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.STRING, + "3minIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATE, + "3minIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATETIME, + "3minIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DECIMALV2, + "3minIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.LARGEINT, + "3minIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .build(); private static final Map MAX_UPDATE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.BOOLEAN, "3maxIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") .put(Type.TINYINT, @@ -368,7 +368,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map STDDEV_INIT_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "14knuth_var_initEPN9doris_udf15FunctionContextEPNS1_9StringValE") .put(Type.SMALLINT, @@ -386,7 +386,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map STDDEV_UPDATE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "16knuth_var_updateIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE") .put(Type.SMALLINT, @@ -405,7 +405,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { private static final Map STDDEV_REMOVE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "16knuth_var_removeIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE") .put(Type.SMALLINT, @@ -422,7 +422,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { "16knuth_var_removeEPN9doris_udf15FunctionContextERKNS1_12DecimalV2ValEPNS1_9StringValE") .build(); private static final Map STDDEV_MERGE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "15knuth_var_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_") .put(Type.SMALLINT, @@ -440,7 +440,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map STDDEV_FINALIZE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "21knuth_stddev_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -458,7 +458,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map STDDEV_GET_VALUE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "22knuth_stddev_get_valueEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -494,7 +494,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map STDDEV_POP_GET_VALUE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "26knuth_stddev_pop_get_valueEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -512,7 +512,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map VAR_FINALIZE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "18knuth_var_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -530,7 +530,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map VAR_GET_VALUE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "19knuth_var_get_valueEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -548,7 +548,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map VAR_POP_FINALIZE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "22knuth_var_pop_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -566,7 +566,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map VAR_POP_GET_VALUE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.TINYINT, "23knuth_var_pop_get_valueEPN9doris_udf15FunctionContextERKNS1_9StringValE") .put(Type.SMALLINT, @@ -586,7 +586,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { public static final String HLL_UNION = "hll_union"; private static final Map HLL_UPDATE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.BOOLEAN, "10hll_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE") .put(Type.TINYINT, @@ -619,7 +619,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { private static final Map HLL_UNION_AGG_UPDATE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.VARCHAR, "_ZN5doris12HllFunctions9hll_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_") .put(Type.STRING, @@ -629,7 +629,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map OFFSET_FN_INIT_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.BOOLEAN, "14offset_fn_initIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextEPT_") .put(Type.DECIMALV2, @@ -662,7 +662,7 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map OFFSET_FN_UPDATE_SYMBOL = - ImmutableMap.builder() + ImmutableMap.builder() .put(Type.BOOLEAN, "16offset_fn_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_") .put(Type.DECIMALV2, @@ -699,144 +699,145 @@ public boolean isNullResultWithOneNullParamFunctions(String funcName) { .build(); private static final Map LAST_VALUE_UPDATE_SYMBOL = - ImmutableMap.builder() - .put(Type.BOOLEAN, - "15last_val_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DECIMALV2, - "15last_val_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.TINYINT, - "15last_val_updateIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.SMALLINT, - "15last_val_updateIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATE, - "15last_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATETIME, - "15last_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.INT, - "15last_val_updateIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.FLOAT, - "15last_val_updateIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.BIGINT, - "15last_val_updateIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DOUBLE, - "15last_val_updateIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") - // .put(Type.CHAR, - // "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.VARCHAR, - "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.STRING, - "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.LARGEINT, - "15last_val_updateIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .build(); + ImmutableMap.builder() + .put(Type.BOOLEAN, + "15last_val_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DECIMALV2, + "15last_val_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.TINYINT, + "15last_val_updateIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.SMALLINT, + "15last_val_updateIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATE, + "15last_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATETIME, + "15last_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.INT, + "15last_val_updateIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.FLOAT, + "15last_val_updateIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.BIGINT, + "15last_val_updateIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DOUBLE, + "15last_val_updateIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") + // .put(Type.CHAR, + // "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.VARCHAR, + "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.STRING, + "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.LARGEINT, + "15last_val_updateIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .build(); private static final Map FIRST_VALUE_REWRITE_UPDATE_SYMBOL = - ImmutableMap.builder() - .put(Type.BOOLEAN, - "24first_val_rewrite_updateIN9doris_udf10BooleanValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.DECIMALV2, - "24first_val_rewrite_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.TINYINT, - "24first_val_rewrite_updateIN9doris_udf10TinyIntValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.SMALLINT, - "24first_val_rewrite_updateIN9doris_udf11SmallIntValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.DATE, - "24first_val_rewrite_updateIN9doris_udf11DateTimeValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.DATETIME, - "24first_val_rewrite_updateIN9doris_udf11DateTimeValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.INT, - "24first_val_rewrite_updateIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.FLOAT, - "24first_val_rewrite_updateIN9doris_udf8FloatValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.BIGINT, - "24first_val_rewrite_updateIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_RKS3_PS6_") - .put(Type.DOUBLE, - "24first_val_rewrite_updateIN9doris_udf9DoubleValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.VARCHAR, - "24first_val_rewrite_updateIN9doris_udf9StringValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.STRING, - "24first_val_rewrite_updateIN9doris_udf9StringValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - .put(Type.LARGEINT, - "24first_val_rewrite_updateIN9doris_udf11LargeIntValEEEvPNS2_15" - + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") - // .put(Type.VARCHAR, - // "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .build(); + ImmutableMap.builder() + .put(Type.BOOLEAN, + "24first_val_rewrite_updateIN9doris_udf10BooleanValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.DECIMALV2, + "24first_val_rewrite_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.TINYINT, + "24first_val_rewrite_updateIN9doris_udf10TinyIntValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.SMALLINT, + "24first_val_rewrite_updateIN9doris_udf11SmallIntValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.DATE, + "24first_val_rewrite_updateIN9doris_udf11DateTimeValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.DATETIME, + "24first_val_rewrite_updateIN9doris_udf11DateTimeValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.INT, + "24first_val_rewrite_updateIN9doris_udf6IntValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.FLOAT, + "24first_val_rewrite_updateIN9doris_udf8FloatValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.BIGINT, + "24first_val_rewrite_updateIN9doris_udf9BigIntValEEEvPNS2_15" + + "FunctionContextERKT_RKS3_PS6_") + .put(Type.DOUBLE, + "24first_val_rewrite_updateIN9doris_udf9DoubleValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.VARCHAR, + "24first_val_rewrite_updateIN9doris_udf9StringValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.STRING, + "24first_val_rewrite_updateIN9doris_udf9StringValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + .put(Type.LARGEINT, + "24first_val_rewrite_updateIN9doris_udf11LargeIntValEEEvPNS2_15" + + "FunctionContextERKT_RKNS2_9BigIntValEPS6_") + // .put(Type.VARCHAR, + // "15last_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .build(); private static final Map LAST_VALUE_REMOVE_SYMBOL = - ImmutableMap.builder() - .put(Type.BOOLEAN, - "15last_val_removeIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DECIMALV2, - "15last_val_removeIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.TINYINT, - "15last_val_removeIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.SMALLINT, - "15last_val_removeIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATE, - "15last_val_removeIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATETIME, - "15last_val_removeIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.INT, - "15last_val_removeIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.FLOAT, - "15last_val_removeIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.BIGINT, - "15last_val_removeIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DOUBLE, - "15last_val_removeIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") - // .put(Type.CHAR, - // "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.VARCHAR, - "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.STRING, - "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.LARGEINT, - "15last_val_removeIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .build(); + ImmutableMap.builder() + .put(Type.BOOLEAN, + "15last_val_removeIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DECIMALV2, + "15last_val_removeIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.TINYINT, + "15last_val_removeIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.SMALLINT, + "15last_val_removeIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATE, + "15last_val_removeIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATETIME, + "15last_val_removeIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.INT, + "15last_val_removeIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.FLOAT, + "15last_val_removeIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.BIGINT, + "15last_val_removeIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DOUBLE, + "15last_val_removeIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") + // .put(Type.CHAR, + // "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.VARCHAR, + "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.STRING, + "15last_val_removeIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.LARGEINT, + "15last_val_removeIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .build(); private static final Map FIRST_VALUE_UPDATE_SYMBOL = - ImmutableMap.builder() - .put(Type.BOOLEAN, - "16first_val_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DECIMALV2, - "16first_val_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.TINYINT, - "16first_val_updateIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.SMALLINT, - "16first_val_updateIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATE, - "16first_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DATETIME, - "16first_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.INT, - "16first_val_updateIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.FLOAT, - "16first_val_updateIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.BIGINT, - "16first_val_updateIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.DOUBLE, - "16first_val_updateIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") - // .put(Type.CHAR, - // "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.VARCHAR, - "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.STRING, - "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") - .put(Type.LARGEINT, - "16first_val_updateIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") - - .build(); + ImmutableMap.builder() + .put(Type.BOOLEAN, + "16first_val_updateIN9doris_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DECIMALV2, + "16first_val_updateIN9doris_udf12DecimalV2ValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.TINYINT, + "16first_val_updateIN9doris_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.SMALLINT, + "16first_val_updateIN9doris_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATE, + "16first_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DATETIME, + "16first_val_updateIN9doris_udf11DateTimeValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.INT, + "16first_val_updateIN9doris_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.FLOAT, + "16first_val_updateIN9doris_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.BIGINT, + "16first_val_updateIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.DOUBLE, + "16first_val_updateIN9doris_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_") + // .put(Type.CHAR, + // "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.VARCHAR, + "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.STRING, + "16first_val_updateIN9doris_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_") + .put(Type.LARGEINT, + "16first_val_updateIN9doris_udf11LargeIntValEEEvPNS2_15FunctionContextERKT_PS6_") + .build(); public static final String TO_BITMAP = "to_bitmap"; public static final String BITMAP_UNION = "bitmap_union"; @@ -1200,8 +1201,8 @@ public static boolean isCastMatchAllowed(Function desc, Function candicate) { || functionName.equalsIgnoreCase("least") || functionName.equalsIgnoreCase("lead") || functionName.equalsIgnoreCase("lag")) { - final ScalarType descArgType = (ScalarType)descArgTypes[0]; - final ScalarType candicateArgType = (ScalarType)candicateArgTypes[0]; + final ScalarType descArgType = (ScalarType) descArgTypes[0]; + final ScalarType candicateArgType = (ScalarType) candicateArgTypes[0]; if (!descArgType.isStringType() && candicateArgType.isStringType()) { // The implementations of hex for string and int are different. return false; @@ -1301,10 +1302,10 @@ public void addBuiltinBothScalaAndVectorized(Function fn) { } ScalarFunction scalarFunction = (ScalarFunction) fn; vecFns.add(ScalarFunction.createVecBuiltin(scalarFunction.functionName(), scalarFunction.getPrepareFnSymbol(), - scalarFunction.getSymbolName(), scalarFunction.getCloseFnSymbol(), - Lists.newArrayList(scalarFunction.getArgs()), scalarFunction.hasVarArgs(), - scalarFunction.getReturnType(), scalarFunction.isUserVisible(), - scalarFunction.getNullableMode())); + scalarFunction.getSymbolName(), scalarFunction.getCloseFnSymbol(), + Lists.newArrayList(scalarFunction.getArgs()), scalarFunction.hasVarArgs(), + scalarFunction.getReturnType(), scalarFunction.isUserVisible(), + scalarFunction.getNullableMode())); } @@ -1326,13 +1327,13 @@ private void initAggregateBuiltins() { // Type stringType[] = {Type.CHAR, Type.VARCHAR}; // count(*) addBuiltin(AggregateFunction.createBuiltin(FunctionSet.COUNT, - new ArrayList(), Type.BIGINT, Type.BIGINT, - prefix + "18init_zero_not_nullIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextEPT_", - prefix + "17count_star_updateEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", - prefix + "11count_mergeEPN9doris_udf15FunctionContextERKNS1_9BigIntValEPS4_", - null, null, - prefix + "17count_star_removeEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", - null, false, true, true)); + new ArrayList(), Type.BIGINT, Type.BIGINT, + prefix + "18init_zero_not_nullIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextEPT_", + prefix + "17count_star_updateEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", + prefix + "11count_mergeEPN9doris_udf15FunctionContextERKNS1_9BigIntValEPS4_", + null, null, + prefix + "17count_star_removeEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", + null, false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin(FunctionSet.COUNT, new ArrayList(), Type.BIGINT, Type.BIGINT, @@ -1401,29 +1402,29 @@ private void initAggregateBuiltins() { // count in multi distinct if (t.equals(Type.CHAR) || t.equals(Type.VARCHAR)) { - addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.VARCHAR, - prefix + "26count_distinct_string_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - prefix + "28count_distinct_string_updateEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", - prefix + "27count_distinct_string_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", - prefix + "31count_distinct_string_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - null, - null, - prefix + "30count_distinct_string_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), + Type.BIGINT, + Type.VARCHAR, + prefix + "26count_distinct_string_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + prefix + "28count_distinct_string_updateEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", + prefix + "27count_distinct_string_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", + prefix + "31count_distinct_string_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + null, + null, + prefix + "30count_distinct_string_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.VARCHAR, - prefix + "", - prefix + "", - prefix + "", - prefix + "", - null, - null, - prefix + "", - false, true, true, true)); + Type.BIGINT, + Type.VARCHAR, + prefix + "", + prefix + "", + prefix + "", + prefix + "", + null, + null, + prefix + "", + false, true, true, true)); } else if (t.equals(Type.STRING)) { addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), Type.BIGINT, @@ -1450,121 +1451,119 @@ private void initAggregateBuiltins() { false, true, true, true)); } else if (t.equals(Type.TINYINT) || t.equals(Type.SMALLINT) || t.equals(Type.INT) || t.equals(Type.BIGINT) || t.equals(Type.LARGEINT) || t.equals(Type.DOUBLE)) { - addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.VARCHAR, - prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), - prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), - null, - null, - prefix + MULTI_DISTINCT_COUNT_FINALIZE_SYMBOL.get(t), - false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), + Type.BIGINT, + Type.VARCHAR, + prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), + prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), + null, + null, + prefix + MULTI_DISTINCT_COUNT_FINALIZE_SYMBOL.get(t), + false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - t, - prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), - prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), - null, - null, - prefix + MULTI_DISTINCT_COUNT_FINALIZE_SYMBOL.get(t), - false, true, true, true)); - - + Type.BIGINT, + t, + prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), + prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), + null, + null, + prefix + MULTI_DISTINCT_COUNT_FINALIZE_SYMBOL.get(t), + false, true, true, true)); } else if (t.equals(Type.DATE) || t.equals(Type.DATETIME)) { - addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.VARCHAR, - prefix + "24count_distinct_date_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - prefix + "26count_distinct_date_updateEPN9doris_udf15FunctionContextERNS1_11DateTimeValEPNS1_9StringValE", - prefix + "25count_distinct_date_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", - prefix + "29count_distinct_date_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - null, - null, - prefix + "28count_distinct_date_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), + Type.BIGINT, + Type.VARCHAR, + prefix + "24count_distinct_date_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + prefix + "26count_distinct_date_updateEPN9doris_udf15FunctionContextERNS1_11DateTimeValEPNS1_9StringValE", + prefix + "25count_distinct_date_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", + prefix + "29count_distinct_date_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + null, + null, + prefix + "28count_distinct_date_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + false, true, true)); // vectorized // now we don't support datetime distinct } else if (t.equals(Type.DECIMALV2)) { - addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.VARCHAR, - prefix + "36count_or_sum_distinct_decimalv2_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - prefix + "38count_or_sum_distinct_decimalv2_updateEPN9doris_udf15FunctionContextERNS1_12DecimalV2ValEPNS1_9StringValE", - prefix + "37count_or_sum_distinct_decimalv2_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", - prefix + "41count_or_sum_distinct_decimalv2_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - null, - null, - prefix + "33count_distinct_decimalv2_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), + Type.BIGINT, + Type.VARCHAR, + prefix + "36count_or_sum_distinct_decimalv2_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + prefix + "38count_or_sum_distinct_decimalv2_updateEPN9doris_udf15FunctionContextERNS1_12DecimalV2ValEPNS1_9StringValE", + prefix + "37count_or_sum_distinct_decimalv2_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", + prefix + "41count_or_sum_distinct_decimalv2_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + null, + null, + prefix + "33count_distinct_decimalv2_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin("multi_distinct_count", Lists.newArrayList(t), - Type.BIGINT, - Type.DECIMALV2, - prefix + "", - prefix + "", - prefix + "", - prefix + "", - null, - null, - prefix + "", - false, true, true, true)); + Type.BIGINT, + Type.DECIMALV2, + prefix + "", + prefix + "", + prefix + "", + prefix + "", + null, + null, + prefix + "", + false, true, true, true)); } // sum in multi distinct if (t.equals(Type.BIGINT) || t.equals(Type.LARGEINT) || t.equals(Type.DOUBLE)) { addBuiltin(AggregateFunction.createBuiltin("multi_distinct_sum", Lists.newArrayList(t), - t, - Type.VARCHAR, - prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), - prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), - null, - null, - prefix + MULTI_DISTINCT_SUM_FINALIZE_SYMBOL.get(t), - false, true, true)); + t, + Type.VARCHAR, + prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), + prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), + null, + null, + prefix + MULTI_DISTINCT_SUM_FINALIZE_SYMBOL.get(t), + false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin("multi_distinct_sum", Lists.newArrayList(t), - t, - t, - prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), - prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), - prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), - null, - null, - prefix + MULTI_DISTINCT_SUM_FINALIZE_SYMBOL.get(t), - false, true, true, true)); + t, + t, + prefix + MULTI_DISTINCT_INIT_SYMBOL.get(t), + prefix + MULTI_DISTINCT_UPDATE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_MERGE_SYMBOL.get(t), + prefix + MULTI_DISTINCT_SERIALIZE_SYMBOL.get(t), + null, + null, + prefix + MULTI_DISTINCT_SUM_FINALIZE_SYMBOL.get(t), + false, true, true, true)); } else if (t.equals(Type.DECIMALV2)) { - addBuiltin(AggregateFunction.createBuiltin("multi_distinct_sum", Lists.newArrayList(t), - MULTI_DISTINCT_SUM_RETURN_TYPE.get(t), - Type.VARCHAR, - prefix + "36count_or_sum_distinct_decimalv2_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - prefix + "38count_or_sum_distinct_decimalv2_updateEPN9doris_udf15FunctionContextERNS1_12DecimalV2ValEPNS1_9StringValE", - prefix + "37count_or_sum_distinct_decimalv2_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", - prefix + "41count_or_sum_distinct_decimalv2_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - null, - null, - prefix + "31sum_distinct_decimalv2_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("multi_distinct_sum", Lists.newArrayList(t), + MULTI_DISTINCT_SUM_RETURN_TYPE.get(t), + Type.VARCHAR, + prefix + "36count_or_sum_distinct_decimalv2_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + prefix + "38count_or_sum_distinct_decimalv2_updateEPN9doris_udf15FunctionContextERNS1_12DecimalV2ValEPNS1_9StringValE", + prefix + "37count_or_sum_distinct_decimalv2_mergeEPN9doris_udf15FunctionContextERNS1_9StringValEPS4_", + prefix + "41count_or_sum_distinct_decimalv2_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + null, + null, + prefix + "31sum_distinct_decimalv2_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + false, true, true)); // vectorized addBuiltin(AggregateFunction.createBuiltin("multi_distinct_sum", Lists.newArrayList(t), - MULTI_DISTINCT_SUM_RETURN_TYPE.get(t), - Type.DECIMALV2, - prefix + "", - prefix + "", - prefix + "", - prefix + "", - null, - null, - prefix + "", - false, true, true, true)); + MULTI_DISTINCT_SUM_RETURN_TYPE.get(t), + Type.DECIMALV2, + prefix + "", + prefix + "", + prefix + "", + prefix + "", + null, + null, + prefix + "", + false, true, true, true)); } // Min String minMaxSerializeOrFinalize = t.isStringType() ? stringValSerializeOrFinalize : null; @@ -1750,39 +1749,39 @@ private void initAggregateBuiltins() { // TopN if (TOPN_UPDATE_SYMBOL.containsKey(t)) { - addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT), Type.VARCHAR, - Type.VARCHAR, - "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - TOPN_UPDATE_SYMBOL.get(t), - "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", - "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - true, false, true)); - addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT, Type.INT), - Type.VARCHAR, Type.VARCHAR, - "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - TOPN_UPDATE_MORE_PARAM_SYMBOL.get(t), - "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", - "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - true, false, true)); - // vectorized - addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT), Type.VARCHAR, - Type.VARCHAR, - "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - TOPN_UPDATE_SYMBOL.get(t), - "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", - "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - true, false, true, true)); - addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT, Type.INT), - Type.VARCHAR, Type.VARCHAR, - "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", - TOPN_UPDATE_MORE_PARAM_SYMBOL.get(t), - "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", - "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", - true, false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT), Type.VARCHAR, + Type.VARCHAR, + "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + TOPN_UPDATE_SYMBOL.get(t), + "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", + "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + true, false, true)); + addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT, Type.INT), + Type.VARCHAR, Type.VARCHAR, + "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + TOPN_UPDATE_MORE_PARAM_SYMBOL.get(t), + "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", + "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + true, false, true)); + // vectorized + addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT), Type.VARCHAR, + Type.VARCHAR, + "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + TOPN_UPDATE_SYMBOL.get(t), + "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", + "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + true, false, true, true)); + addBuiltin(AggregateFunction.createBuiltin("topn", Lists.newArrayList(t, Type.INT, Type.INT), + Type.VARCHAR, Type.VARCHAR, + "_ZN5doris13TopNFunctions9topn_initEPN9doris_udf15FunctionContextEPNS1_9StringValE", + TOPN_UPDATE_MORE_PARAM_SYMBOL.get(t), + "_ZN5doris13TopNFunctions10topn_mergeEPN9doris_udf15FunctionContextERKNS1_9StringValEPS4_", + "_ZN5doris13TopNFunctions14topn_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + "_ZN5doris13TopNFunctions13topn_finalizeEPN9doris_udf15FunctionContextERKNS1_9StringValE", + true, false, true, true)); } if (STDDEV_UPDATE_SYMBOL.containsKey(t)) { @@ -2017,7 +2016,7 @@ private void initAggregateBuiltins() { Type.VARCHAR, Type.STRING}; for (Type t : types) { addBuiltin(AggregateFunction.createBuiltin(ORTHOGONAL_BITMAP_INTERSECT, - Lists.newArrayList(Type.BITMAP, t,t), + Lists.newArrayList(Type.BITMAP, t, t), Type.BITMAP, Type.VARCHAR, true, @@ -2030,7 +2029,7 @@ private void initAggregateBuiltins() { "_ZN5doris15BitmapFunctions16bitmap_serializeEPN9doris_udf15FunctionContextERKNS1_9StringValE", true, false, true)); addBuiltin(AggregateFunction.createBuiltin(ORTHOGONAL_BITMAP_INTERSECT_COUNT, - Lists.newArrayList(Type.BITMAP, t,t), + Lists.newArrayList(Type.BITMAP, t, t), Type.BIGINT, Type.VARCHAR, true, @@ -2342,7 +2341,7 @@ private void initAggregateBuiltins() { prefix + "20dense_rank_get_valueEPN9doris_udf15FunctionContextERNS1_9StringValE", prefix + "13rank_finalizeEPN9doris_udf15FunctionContextERNS1_9StringValE")); //row_number - addBuiltin(AggregateFunction.createAnalyticBuiltin( "row_number", + addBuiltin(AggregateFunction.createAnalyticBuiltin("row_number", new ArrayList(), Type.BIGINT, Type.BIGINT, prefix + "18init_zero_not_nullIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextEPT_", prefix + "17count_star_updateEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", @@ -2366,7 +2365,7 @@ private void initAggregateBuiltins() { prefix + "20dense_rank_get_valueEPN9doris_udf15FunctionContextERNS1_9StringValE", prefix + "13rank_finalizeEPN9doris_udf15FunctionContextERNS1_9StringValE", true)); //vec row_number - addBuiltin(AggregateFunction.createAnalyticBuiltin( "row_number", + addBuiltin(AggregateFunction.createAnalyticBuiltin("row_number", new ArrayList(), Type.BIGINT, Type.BIGINT, prefix + "18init_zero_not_nullIN9doris_udf9BigIntValEEEvPNS2_15FunctionContextEPT_", prefix + "17count_star_updateEPN9doris_udf15FunctionContextEPNS1_9BigIntValE", diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java index 53e27dfc6c127b..5b11708bfdf42c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java @@ -550,7 +550,7 @@ public ExprNodeGenericFuncDesc build() throws DdlException { if (stack.size() != 1) { throw new DdlException("Build Hive expression Failed: " + stack.size()); } - return (ExprNodeGenericFuncDesc)stack.pop(); + return (ExprNodeGenericFuncDesc) stack.pop(); } public ExprBuilder pred(String name, int args) throws DdlException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java index d418e473072b8c..68fb389335f0aa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java @@ -104,7 +104,7 @@ private void validate(Map properties) throws DdlException { if (!copiedProps.isEmpty()) { Iterator> iter = copiedProps.entrySet().iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { Map.Entry entry = iter.next(); if (entry.getKey().startsWith(HIVE_HDFS_PREFIX)) { hiveProperties.put(entry.getKey(), entry.getValue()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java index 201c89d0532c73..afa6562f86a0d2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java @@ -34,7 +34,7 @@ import java.util.List; import java.util.Map; -public class ListPartitionInfo extends PartitionInfo{ +public class ListPartitionInfo extends PartitionInfo { public ListPartitionInfo() { // for persist @@ -60,8 +60,8 @@ public PartitionItem createAndCheckPartitionItem(SinglePartitionDesc desc, boole for (List values : partitionKeyDesc.getInValues()) { Preconditions.checkArgument(values.size() == partitionColumns.size(), - "partition key desc list size[" + values.size() + "] is not equal to " + - "partition column size[" + partitionColumns.size() + "]"); + "partition key desc list size[" + values.size() + "] is not equal to " + + "partition column size[" + partitionColumns.size() + "]"); } List partitionKeys = new ArrayList<>(); try { @@ -83,11 +83,11 @@ public PartitionItem createAndCheckPartitionItem(SinglePartitionDesc desc, boole private void checkNewPartitionKey(PartitionKey newKey, PartitionKeyDesc keyDesc, boolean isTemp) throws AnalysisException { Map id2Item = idToItem; if (isTemp) { - id2Item = idToTempItem; + id2Item = idToTempItem; } // check new partition key not exists. for (Map.Entry entry : id2Item.entrySet()) { - if (((ListPartitionItem)entry.getValue()).getItems().contains(newKey)) { + if (((ListPartitionItem) entry.getValue()).getItems().contains(newKey)) { StringBuilder sb = new StringBuilder(); sb.append("The partition key[").append(newKey.toSql()).append("] in partition item[") .append(keyDesc.toSql()).append("] is conflict with current partitionKeys[") diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java index acf6820cdbc3d2..887af28a74734c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java @@ -88,7 +88,7 @@ private static List> getTabletStatus(String dbName, String tblName, status = ReplicaStatus.DEAD; } else if (replica.getVersion() < visibleVersion || replica.getLastFailedVersion() > 0) { - status = ReplicaStatus.VERSION_ERROR; + status = ReplicaStatus.VERSION_ERROR; } else if (replica.getSchemaHash() != -1 && replica.getSchemaHash() != schemaHash) { status = ReplicaStatus.SCHEMA_ERROR; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java index e77f9ffe15a62b..4340ff2cf962d8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java @@ -144,7 +144,7 @@ public int getSignature(int signatureVersion) { adler32.update(type.name().getBytes(charsetName)); LOG.debug("signature. view type: {}", type.name()); // configs - for (Map.Entry config: configs.entrySet()) { + for (Map.Entry config : configs.entrySet()) { adler32.update(config.getKey().getBytes(charsetName)); adler32.update(config.getValue().getBytes(charsetName)); LOG.debug("signature. view config: {}", config); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java index 9c69ef787fb442..0fa55265d55080 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java @@ -111,8 +111,8 @@ public OdbcTable(long id, String name, List schema, Map private void validate(Map properties) throws DdlException { if (properties == null) { throw new DdlException("Please set properties of odbc table, " - + "they are: odbc_catalog_resource or [host, port, user, password, driver, odbc_type]" + - " and database and table"); + + "they are: odbc_catalog_resource or [host, port, user, password, driver, odbc_type]" + + " and database and table"); } if (properties.containsKey(ODBC_CATALOG_RESOURCE)) { odbcCatalogResourceName = properties.get(ODBC_CATALOG_RESOURCE); @@ -383,7 +383,7 @@ public OdbcTable clone() { return copied; } - public void resetIdsForRestore(Catalog catalog){ + public void resetIdsForRestore(Catalog catalog) { id = catalog.getNextId(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 064aad90e5178e..c7401e08aecafb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -629,7 +629,7 @@ public KeysType getKeysType() { public KeysType getKeysTypeByIndexId(long indexId) { MaterializedIndexMeta indexMeta = indexIdToMeta.get(indexId); Preconditions.checkNotNull(indexMeta, "index id:" + indexId + " meta is null"); - return indexMeta.getKeysType(); + return indexMeta.getKeysType(); } public PartitionInfo getPartitionInfo() { @@ -704,8 +704,8 @@ private Partition dropPartition(long dbId, String partitionName, boolean isForce idToPartition.remove(partition.getId()); nameToPartition.remove(partitionName); - Preconditions.checkState(partitionInfo.getType() == PartitionType.RANGE || - partitionInfo.getType() == PartitionType.LIST); + Preconditions.checkState(partitionInfo.getType() == PartitionType.RANGE + || partitionInfo.getType() == PartitionType.LIST); if (!isForceDrop) { // recycle partition @@ -1177,7 +1177,7 @@ public void readFields(DataInput in) throws IOException { partitionInfo = RangePartitionInfo.read(in); } else if (partType == PartitionType.LIST) { partitionInfo = ListPartitionInfo.read(in); - }else { + } else { throw new IOException("invalid partition type: " + partType); } @@ -1475,7 +1475,7 @@ public List getBaseSchema(boolean full) { public Column getBaseColumn(String columnName) { for (Column column : getBaseSchema()) { - if (column.getName().equalsIgnoreCase(columnName)){ + if (column.getName().equalsIgnoreCase(columnName)) { return column; } } @@ -1719,15 +1719,15 @@ public boolean meetAggDistributionRequirements(AggregateInfo aggregateInfo) { if (groupingExps == null || groupingExps.isEmpty()) { return false; } - List partitionExps = aggregateInfo.getPartitionExprs() != null ? - aggregateInfo.getPartitionExprs() : groupingExps; + List partitionExps = aggregateInfo.getPartitionExprs() != null + ? aggregateInfo.getPartitionExprs() : groupingExps; DistributionInfo distribution = getDefaultDistributionInfo(); - if(distribution instanceof HashDistributionInfo) { + if (distribution instanceof HashDistributionInfo) { List distributeColumns = - ((HashDistributionInfo)distribution).getDistributionColumns(); + ((HashDistributionInfo) distribution).getDistributionColumns(); PartitionInfo partitionInfo = getPartitionInfo(); if (partitionInfo instanceof RangePartitionInfo) { - List rangeColumns = ((RangePartitionInfo)partitionInfo).getPartitionColumns(); + List rangeColumns = partitionInfo.getPartitionColumns(); if (!distributeColumns.containsAll(rangeColumns)) { return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index 73cbde708ea879..41a37240f3052f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -92,7 +92,7 @@ public PartitionType getType() { return type; } - public List getPartitionColumns(){ + public List getPartitionColumns() { return partitionColumns; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionItem.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionItem.java index f71b8c60b0dd1a..b62ece44748c7a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionItem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionItem.java @@ -22,7 +22,7 @@ import java.util.Comparator; import java.util.Map; -public abstract class PartitionItem implements Comparable,Writable { +public abstract class PartitionItem implements Comparable, Writable { public static final Comparator> ITEM_MAP_ENTRY_COMPARATOR = Comparator.comparing(o -> ((ListPartitionItem) o.getValue()).getItems().iterator().next()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java index 702e9daf377af9..d88369b77e4a98 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java @@ -715,7 +715,7 @@ public boolean isDateType() { return (this == DATE || this == DATETIME); } - public boolean isArrayType(){ + public boolean isArrayType() { return this == ARRAY; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java index 5cc4ce13bcd332..090c78c4e27b38 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java @@ -82,8 +82,8 @@ private Range createAndCheckNewRange(PartitionKeyDesc partKeyDesc, boolean isFixedPartitionKeyValueType = partKeyDesc.getPartitionType() == PartitionKeyDesc.PartitionKeyValueType.FIXED; // generate partitionItemEntryList - List> partitionItemEntryList = isFixedPartitionKeyValueType ? - getPartitionItemEntryList(isTemp, false) : getPartitionItemEntryList(isTemp, true); + List> partitionItemEntryList = isFixedPartitionKeyValueType + ? getPartitionItemEntryList(isTemp, false) : getPartitionItemEntryList(isTemp, true); if (isFixedPartitionKeyValueType) { return createNewRangeForFixedPartitionValueType(partKeyDesc, partitionItemEntryList); @@ -137,10 +137,9 @@ private Range createNewRangeForFixedPartitionValueType(PartitionKe } private Range createNewRangeForLessThanPartitionValueType(PartitionKey newRangeUpper, - Range lastRange, Range currentRange) - throws AnalysisException, DdlException { - PartitionKey lowKey = lastRange == null ? - PartitionKey.createInfinityPartitionKey(partitionColumns, false) : lastRange.upperEndpoint(); + Range lastRange, Range currentRange) throws AnalysisException, DdlException { + PartitionKey lowKey = lastRange == null ? PartitionKey.createInfinityPartitionKey(partitionColumns, false) + : lastRange.upperEndpoint(); // check: [left, right), error if left equal right if (lowKey.compareTo(newRangeUpper) >= 0) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java index 299160cc432a0a..c87fd6548191b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java @@ -271,16 +271,6 @@ public static ScalarFunction createBuiltin( new FunctionName(name), argTypes, retType, hasVarArgs, userVisible); fn.symbolName = symbol; fn.nullableMode = nullableMode; - -// try { -// fn.symbolName_ = fn.lookupSymbol(symbol, TSymbolType.UDF_EVALUATE, null, -// fn.hasVarArgs(), fn.getArgs()); -// } catch (AnalysisException e) { -// // This should never happen -// Preconditions.checkState(false, "Builtin symbol '" + symbol + "'" + argTypes -// + " not found!" + e.getStackTrace()); -// throw new RuntimeException("Builtin symbol not found!", e); -// } return fn; } @@ -333,13 +323,29 @@ public static ScalarFunction createUdf( return fn; } - public void setSymbolName(String s) { symbolName = s; } - public void setPrepareFnSymbol(String s) { prepareFnSymbol = s; } - public void setCloseFnSymbol(String s) { closeFnSymbol = s; } + public void setSymbolName(String s) { + symbolName = s; + } + + public void setPrepareFnSymbol(String s) { + prepareFnSymbol = s; + } + + public void setCloseFnSymbol(String s) { + closeFnSymbol = s; + } - public String getSymbolName() { return symbolName; } - public String getPrepareFnSymbol() { return prepareFnSymbol; } - public String getCloseFnSymbol() { return closeFnSymbol; } + public String getSymbolName() { + return symbolName; + } + + public String getPrepareFnSymbol() { + return prepareFnSymbol; + } + + public String getCloseFnSymbol() { + return closeFnSymbol; + } @Override public String toSql(boolean ifNotExists) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java index f21798d51d8384..bb9a7a952055cc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java @@ -414,7 +414,7 @@ public void toThrift(TTypeDesc container) { TScalarType scalarType = new TScalarType(); scalarType.setType(type.toThrift()); - switch(type) { + switch (type) { case VARCHAR: case CHAR: case HLL: @@ -444,18 +444,39 @@ public int decimalScale() { } @Override - public PrimitiveType getPrimitiveType() { return type; } - public int ordinal() { return type.ordinal(); } + public PrimitiveType getPrimitiveType() { + return type; + } + + public int ordinal() { + return type.ordinal(); + } @Override - public int getLength() { return len; } - public void setLength(int len) {this.len = len; } - public boolean isAssignedStrLenInColDefinition() { return isAssignedStrLenInColDefinition; } - public void setAssignedStrLenInColDefinition() { this.isAssignedStrLenInColDefinition = true; } + public int getLength() { + return len; + } + + public void setLength(int len) { + this.len = len; + } + + public boolean isAssignedStrLenInColDefinition() { + return isAssignedStrLenInColDefinition; + } + + public void setAssignedStrLenInColDefinition() { + this.isAssignedStrLenInColDefinition = true; + } // add scalar infix to override with getPrecision - public int getScalarScale() { return scale; } - public int getScalarPrecision() { return precision; } + public int getScalarScale() { + return scale; + } + + public int getScalarPrecision() { + return precision; + } public String getScalarPrecisionStr() { return precisionStr; @@ -561,7 +582,7 @@ public boolean equals(Object o) { if (!(o instanceof ScalarType)) { return false; } - ScalarType other = (ScalarType)o; + ScalarType other = (ScalarType) o; if (type != other.type) { return false; } @@ -571,7 +592,7 @@ public boolean equals(Object o) { if (type == PrimitiveType.VARCHAR) { return len == other.len; } - if ( type == PrimitiveType.DECIMALV2) { + if (type == PrimitiveType.DECIMALV2) { return precision == other.precision && scale == other.scale; } return true; @@ -588,7 +609,7 @@ public Type getMaxResolutionType() { } else if (isDecimalV2()) { return createDecimalV2TypeInternal(MAX_PRECISION, scale); } else if (isLargeIntType()) { - return ScalarType.LARGEINT; + return ScalarType.LARGEINT; } else { return ScalarType.INVALID; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java index c1b8ff513f60a2..a774ed7c158fd1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java @@ -67,7 +67,7 @@ public static Builder builder() { // like 'show table where_clause'. If we decide to support it, we must mock these related table here. public static Map TABLE_MAP = ImmutableMap - . builder() + .builder() .put("tables", new SchemaTable( SystemIdGenerator.getNextId(), "tables", diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java index e7b2ed43a45aac..df70948dcdcc49 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java @@ -180,7 +180,7 @@ public boolean writeLockIfExist() { public boolean tryWriteLock(long timeout, TimeUnit unit) { try { - return this.rwLock.writeLock().tryLock(timeout, unit); + return this.rwLock.writeLock().tryLock(timeout, unit); } catch (InterruptedException e) { LOG.warn("failed to try write lock at table[" + name + "]", e); return false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java index c1d3f5e2cf36e4..c96c59e1c73db8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java @@ -240,9 +240,9 @@ public List getQueryableReplicas(long visibleVersion) { } } final long finalMinVersionCount = minVersionCount; - return allQueryableReplica.stream().filter(replica -> replica.getVersionCount() == -1 || - replica.getVersionCount() < Config.min_version_count_indicate_replica_compaction_too_slow || - replica.getVersionCount() < finalMinVersionCount * QUERYABLE_TIMES_OF_MIN_VERSION_COUNT) + return allQueryableReplica.stream().filter(replica -> replica.getVersionCount() == -1 + || replica.getVersionCount() < Config.min_version_count_indicate_replica_compaction_too_slow + || replica.getVersionCount() < finalMinVersionCount * QUERYABLE_TIMES_OF_MIN_VERSION_COUNT) .collect(Collectors.toList()); } return allQueryableReplica; @@ -544,8 +544,8 @@ public Pair getHealthStatusWithPriority( // get the max version diff long delta = versions.get(versions.size() - 1) - versions.get(0); double ratio = (double) delta / versions.get(versions.size() - 1); - if (versions.get(versions.size() - 1) > Config.min_version_count_indicate_replica_compaction_too_slow && - ratio > Config.valid_version_count_delta_ratio_between_replicas) { + if (versions.get(versions.size() - 1) > Config.min_version_count_indicate_replica_compaction_too_slow + && ratio > Config.valid_version_count_delta_ratio_between_replicas) { return Pair.create(TabletStatus.REPLICA_COMPACTION_TOO_SLOW, Priority.HIGH); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java index ff37661e7d9c36..1d843226f8ea2d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java @@ -154,8 +154,8 @@ public void tabletReport(long backendId, Map backendTablets, // check and set path // path info of replica is only saved in Master FE - if (backendTabletInfo.isSetPathHash() && - replica.getPathHash() != backendTabletInfo.getPathHash()) { + if (backendTabletInfo.isSetPathHash() + && replica.getPathHash() != backendTabletInfo.getPathHash()) { replica.setPathHash(backendTabletInfo.getPathHash()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java index 2a522baa3a3f2d..0ffbcdd37a552e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java @@ -143,7 +143,9 @@ public static ArrayList getSupportedTypes() { * The output of this is stored directly in the hive metastore as the column type. * The string must match exactly. */ - public final String toSql() { return toSql(0); } + public final String toSql() { + return toSql(0); + } /** * Recursive helper for toSql() to be implemented by subclasses. Keeps track of the @@ -154,7 +156,9 @@ public static ArrayList getSupportedTypes() { /** * Same as toSql() but adds newlines and spaces for better readability of nested types. */ - public String prettyPrint() { return prettyPrint(0); } + public String prettyPrint() { + return prettyPrint(0); + } /** * Pretty prints this type with lpad number of leading spaces. Used to implement @@ -182,9 +186,17 @@ public boolean isDecimalV2() { return isScalarType(PrimitiveType.DECIMALV2); } - public boolean isWildcardDecimal() { return false; } - public boolean isWildcardVarchar() { return false; } - public boolean isWildcardChar() { return false; } + public boolean isWildcardDecimal() { + return false; + } + + public boolean isWildcardVarchar() { + return false; + } + + public boolean isWildcardChar() { + return false; + } public boolean isStringType() { return isScalarType(PrimitiveType.VARCHAR) @@ -208,8 +220,8 @@ public boolean isOnlyMetricType() { } public static final String OnlyMetricTypeErrorMsg = - "Doris hll and bitmap column must use with specific function, and don't support filter or group by." + - "please run 'help hll' or 'help bitmap' in your mysql client."; + "Doris hll and bitmap column must use with specific function, and don't support filter or group by." + + "please run 'help hll' or 'help bitmap' in your mysql client."; public boolean isHllType() { return isScalarType(PrimitiveType.HLL); @@ -219,7 +231,9 @@ public boolean isBitmapType() { return isScalarType(PrimitiveType.BITMAP); } - public boolean isQuantileStateType() { return isScalarType(PrimitiveType.QUANTILE_STATE); } + public boolean isQuantileStateType() { + return isScalarType(PrimitiveType.QUANTILE_STATE); + } public boolean isObjectStored() { return isHllType() || isBitmapType() || isQuantileStateType(); @@ -234,9 +248,9 @@ public boolean isScalarType(PrimitiveType t) { } public boolean isFixedPointType() { - return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT) || - isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT) || - isScalarType(PrimitiveType.LARGEINT); + return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT) + || isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT) + || isScalarType(PrimitiveType.LARGEINT); } public boolean isFloatingPointType() { @@ -318,7 +332,9 @@ public boolean isSupported() { return true; } - public int getLength() { return -1; } + public int getLength() { + return -1; + } /** * Indicates whether we support partitioning tables on columns of this type. @@ -402,7 +418,7 @@ public static boolean canCastTo(Type t1, Type t2) { if (t1.isScalarType() && t2.isScalarType()) { return ScalarType.canCastTo((ScalarType) t1, (ScalarType) t2); } else if (t1.isArrayType() && t2.isArrayType()) { - return ArrayType.canCastTo((ArrayType)t1, (ArrayType)t2); + return ArrayType.canCastTo((ArrayType) t1, (ArrayType) t2); } return t1.isNull() || t1.getPrimitiveType() == PrimitiveType.VARCHAR; } @@ -450,9 +466,9 @@ public static Type getNextNumType(Type t) { * Returns null if this expr is not instance of StringLiteral or StringLiteral * inner value could not parse to long. otherwise return parsed Long result. */ - public static Long tryParseToLong(Expr expectStringExpr){ + public static Long tryParseToLong(Expr expectStringExpr) { if (expectStringExpr instanceof StringLiteral) { - String value = ((StringLiteral)expectStringExpr).getValue(); + String value = ((StringLiteral) expectStringExpr).getValue(); return Longs.tryParse(value); } return null; @@ -506,7 +522,7 @@ private boolean exceedsMaxNestingDepth(int d) { // TODO(dhc): fix this public static Type fromPrimitiveType(PrimitiveType type) { - switch(type) { + switch (type) { case BOOLEAN: return Type.BOOLEAN; case TINYINT: @@ -559,7 +575,7 @@ public static List toThrift(Type[] types) { public static List toThrift(ArrayList types) { ArrayList result = Lists.newArrayList(); - for (Type t: types) { + for (Type t : types) { result.add(t.toThrift()); } return result; @@ -968,7 +984,7 @@ public Integer getNumPrecRadix() { // DOUBLE compatibilityMatrix[DOUBLE.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE; - compatibilityMatrix[DOUBLE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DOUBLE ; + compatibilityMatrix[DOUBLE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DOUBLE; compatibilityMatrix[DOUBLE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE; compatibilityMatrix[DOUBLE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE; compatibilityMatrix[DOUBLE.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE; @@ -1141,8 +1157,8 @@ public static Type getCmpType(Type t1, Type t2) { } // int family type and char family type should cast to char family type - if ((t1ResultType.isFixedPointType() && t2ResultType.isCharFamily()) || - (t2ResultType.isFixedPointType() && t1ResultType.isCharFamily())) { + if ((t1ResultType.isFixedPointType() && t2ResultType.isCharFamily()) + || (t2ResultType.isFixedPointType() && t1ResultType.isCharFamily())) { return t1.isStringType() ? t1 : t2; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java index c842776257c91b..26966ebe6365fd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java @@ -178,8 +178,8 @@ public synchronized QueryStmt init() throws UserException { } // Make sure the view definition parses to a query statement. if (!(node instanceof QueryStmt)) { - throw new UserException(String.format("View definition of %s " + - "is not a query statement", name)); + throw new UserException(String.format("View definition of %s " + + "is not a query statement", name)); } queryStmtRef = new SoftReference((QueryStmt) node); return (QueryStmt) node; @@ -188,7 +188,9 @@ public synchronized QueryStmt init() throws UserException { /** * Returns the column labels the user specified in the WITH-clause. */ - public List getOriginalColLabels() { return colLabels; } + public List getOriginalColLabels() { + return colLabels; + } /** * Returns the explicit column labels for this view, or null if they need to be derived @@ -240,7 +242,7 @@ public View clone() { return copied; } - public void resetIdsForRestore(Catalog catalog){ + public void resetIdsForRestore(Catalog catalog) { id = catalog.getNextId(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java index 69474e57fc1e8e..bcc5edcab9a5e4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java @@ -314,7 +314,7 @@ public BalanceStatus isFit(long tabletSize, TStorageMedium medium, return status; } - /* + /** * Check whether the backend can be more balance if we migrate a tablet with size 'tabletSize' from * `srcPath` to 'destPath' * 1. recalculate the load score of src and dest path after migrate the tablet. @@ -345,17 +345,17 @@ public boolean isMoreBalanced(long srcPath, long destPath, long tabletId, long t } double avgUsedPercent = totalCapacity == 0 ? 0.0 : totalUsedCapacity / (double) totalCapacity; double currentSrcPathScore = srcPathStat.getCapacityB() == 0 - ? 0.0 : srcPathStat.getUsedCapacityB() / (double) srcPathStat.getCapacityB(); + ? 0.0 : srcPathStat.getUsedCapacityB() / (double) srcPathStat.getCapacityB(); double currentDestPathScore = destPathStat.getCapacityB() == 0 - ? 0.0 : destPathStat.getUsedCapacityB() / (double) destPathStat.getCapacityB(); + ? 0.0 : destPathStat.getUsedCapacityB() / (double) destPathStat.getCapacityB(); double newSrcPathScore = srcPathStat.getCapacityB() == 0 - ? 0.0 : (srcPathStat.getUsedCapacityB() - tabletSize) / (double) srcPathStat.getCapacityB(); + ? 0.0 : (srcPathStat.getUsedCapacityB() - tabletSize) / (double) srcPathStat.getCapacityB(); double newDestPathScore = destPathStat.getCapacityB() == 0 - ? 0.0 : (destPathStat.getUsedCapacityB() + tabletSize) / (double) destPathStat.getCapacityB(); + ? 0.0 : (destPathStat.getUsedCapacityB() + tabletSize) / (double) destPathStat.getCapacityB(); double currentDiff = Math.abs(currentSrcPathScore - avgUsedPercent) - + Math.abs(currentDestPathScore - avgUsedPercent); + + Math.abs(currentDestPathScore - avgUsedPercent); double newDiff = Math.abs(newSrcPathScore - avgUsedPercent) + Math.abs(newDestPathScore - avgUsedPercent); LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index 0bc22395798a66..111f8d0aa7a9ca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -449,8 +449,8 @@ private boolean relocateAndBalance(GroupId groupId, Tag tag, Set unavailab if (!isThisRoundChanged) { // if all backends are checked but this round is not changed, // we should end the loop - LOG.info("all backends are checked but this round is not changed, " + - "end outer loop in colocate group {}", groupId); + LOG.info("all backends are checked but this round is not changed, " + + "end outer loop in colocate group {}", groupId); break; } // end inner loop diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java index 4728803fc7c073..0aec8f21f00d02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java @@ -336,7 +336,7 @@ private ArrayList getDropPartitionClause(Database db, OlapT for (Long dropPartitionId : isContaineds.keySet()) { // Do not drop the partition "by force", or the partition will be dropped directly instread of being in // catalog recycle bin. This is for safe reason. - if(!isContaineds.get(dropPartitionId)) { + if (!isContaineds.get(dropPartitionId)) { String dropPartitionName = olapTable.getPartition(dropPartitionId).getName(); dropPartitionClauses.add(new DropPartitionClause(false, dropPartitionName, false, false)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java index 0962afd9e60a65..406bf53bb58b0f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java @@ -322,11 +322,11 @@ public static class TabletMove { @Override public String toString() { - return "ReplicaMove{" + - "tabletId=" + tabletId + - ", fromBe=" + fromBe + - ", toBe=" + toBe + - '}'; + return "ReplicaMove{" + + "tabletId=" + tabletId + + ", fromBe=" + fromBe + + ", toBe=" + toBe + + '}'; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java index d51cae0fe4a10c..121a8de8b5acea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java @@ -69,13 +69,15 @@ public class TabletChecker extends MasterDaemon { private TabletScheduler tabletScheduler; private TabletSchedulerStat stat; - HashMap tabletCountByStatus = new HashMap() {{ - put("total", new AtomicLong(0L)); - put("unhealthy", new AtomicLong(0L)); - put("added", new AtomicLong(0L)); - put("in_sched", new AtomicLong(0L)); - put("not_ready", new AtomicLong(0L)); - }}; + HashMap tabletCountByStatus = new HashMap() { + { + put("total", new AtomicLong(0L)); + put("unhealthy", new AtomicLong(0L)); + put("added", new AtomicLong(0L)); + put("in_sched", new AtomicLong(0L)); + put("not_ready", new AtomicLong(0L)); + } + }; // db id -> (tbl id -> PrioPart) // priority of replicas of partitions in this table will be set to VERY_HIGH if not healthy diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java index e228ee9cb9137e..6bc3947cc873fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java @@ -695,8 +695,9 @@ public void chooseDestReplicaForVersionIncomplete(Map backendsWo // forever, because the replica in the DECOMMISSION state will not receive the load task. chosenReplica.setWatermarkTxnId(-1); chosenReplica.setState(ReplicaState.NORMAL); - LOG.info("choose replica {} on backend {} of tablet {} as dest replica for version incomplete," + - " and change state from DECOMMISSION to NORMAL", chosenReplica.getId(), chosenReplica.getBackendId(), tabletId); + LOG.info("choose replica {} on backend {} of tablet {} as dest replica for version incomplete," + + " and change state from DECOMMISSION to NORMAL", + chosenReplica.getId(), chosenReplica.getBackendId(), tabletId); } setDest(chosenReplica.getBackendId(), chosenReplica.getPathHash()); } @@ -1074,7 +1075,7 @@ public boolean adjustPriority(TabletSchedulerStat stat) { failedSchedCounter = 0; if (originDynamicPriority != dynamicPriority) { LOG.debug("downgrade dynamic priority from {} to {}, origin: {}, tablet: {}", - originDynamicPriority.name(), dynamicPriority.name(), origPriority.name(), tabletId); + originDynamicPriority.name(), dynamicPriority.name(), origPriority.name(), tabletId); stat.counterTabletPrioDowngraded.incrementAndGet(); return true; } @@ -1083,7 +1084,7 @@ public boolean adjustPriority(TabletSchedulerStat stat) { // no need to set lastSchedTime, lastSchedTime is set each time we schedule this tablet if (originDynamicPriority != dynamicPriority) { LOG.debug("upgrade dynamic priority from {} to {}, origin: {}, tablet: {}", - originDynamicPriority.name(), dynamicPriority.name(), origPriority.name(), tabletId); + originDynamicPriority.name(), dynamicPriority.name(), origPriority.name(), tabletId); stat.counterTabletPrioUpgraded.incrementAndGet(); return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index a6c09ef04883bb..6b84a432ed286e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -545,13 +545,14 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) try { DatabaseTransactionMgr dbTransactionMgr = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(db.getId()); for (TransactionState transactionState : dbTransactionMgr.getPreCommittedTxnList()) { - if(transactionState.getTableIdList().contains(tbl.getId())) { + if (transactionState.getTableIdList().contains(tbl.getId())) { // If table releate to transaction with precommitted status, do not allow to do balance. throw new SchedException(Status.UNRECOVERABLE, "There exists PRECOMMITTED transaction related to table"); } } } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } } @@ -578,8 +579,8 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) } // for disk balance more accurately, we only schedule tablet when has lastly stat info about disk - if (tabletCtx.getType() == TabletSchedCtx.Type.BALANCE && - tabletCtx.getBalanceType() == TabletSchedCtx.BalanceType.DISK_BALANCE) { + if (tabletCtx.getType() == TabletSchedCtx.Type.BALANCE + && tabletCtx.getBalanceType() == TabletSchedCtx.BalanceType.DISK_BALANCE) { checkDiskBalanceLastSuccTime(tabletCtx.getTempSrcBackendId(), tabletCtx.getTempSrcPathHash()); } // we do not concern priority here. diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java index 35902463554c80..6dcb401f43cc2e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java @@ -74,10 +74,10 @@ public boolean equals(Object o) { return false; } PartitionMove that = (PartitionMove) o; - return Objects.equal(partitionId, that.partitionId) && - Objects.equal(indexId, that.indexId) && - Objects.equal(fromBe, that.fromBe) && - Objects.equal(toBe, that.toBe); + return Objects.equal(partitionId, that.partitionId) + && Objects.equal(indexId, that.indexId) + && Objects.equal(fromBe, that.fromBe) + && Objects.equal(toBe, that.toBe); } @Override @@ -87,11 +87,11 @@ public int hashCode() { @Override public String toString() { - return "ReplicaMove{" + - "pid=" + partitionId + "-" + indexId + - ", from=" + fromBe + - ", to=" + toBe + - '}'; + return "ReplicaMove{" + + "pid=" + partitionId + "-" + indexId + + ", from=" + fromBe + + ", to=" + toBe + + '}'; } } @@ -189,8 +189,8 @@ private PartitionMove getNextMove(TreeMultimap beByTotalReplicaCount // improves cluster skew. NavigableSet maxSet = skewMap.get(maxPartitionSkew); for (PartitionBalanceInfo pbi : maxSet) { - Preconditions.checkArgument(!pbi.beByReplicaCount.isEmpty(), "no information on replicas of " + - "partition " + pbi.partitionId + "-" + pbi.indexId); + Preconditions.checkArgument(!pbi.beByReplicaCount.isEmpty(), "no information on replicas of " + + "partition " + pbi.partitionId + "-" + pbi.indexId); Long minReplicaCount = pbi.beByReplicaCount.keySet().first(); Long maxReplicaCount = pbi.beByReplicaCount.keySet().last(); @@ -216,7 +216,8 @@ private PartitionMove getNextMove(TreeMultimap beByTotalReplicaCount continue; } - Long minLoadedBe, maxLoadedBe; + Long minLoadedBe; + Long maxLoadedBe; if (equalSkewOption == EqualSkewOption.PICK_FIRST) { // beWithExtremumCount lists & intersection lists are natural ordering minLoadedBe = minLoaded.intersection.isEmpty() ? minLoaded.beWithExtremumCount.get(0) : minLoaded.intersection.get(0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java index dbe2692fc84b87..4cef7be02537c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java @@ -56,7 +56,7 @@ public class CIDR { // Specify IP in CIDR format like: new IPv4("192.168.0.8/16"); public CIDR(String cidrNotation) { // if there is no mask, fill "/32" as suffix - if(cidrNotation.indexOf("/") == -1) { + if (!cidrNotation.contains("/")) { cidrNotation += "/32"; } @@ -78,7 +78,7 @@ public boolean contains(String address) { return contains(toInteger(address)); } - // Get the IP in symbolic form, i.e. xxx.xxx.xxx.xxx + // Get the IP in symbolic form, i.e. xxx.xxx.xxx.xxx public String getIP() { return format(address); } @@ -101,7 +101,7 @@ private String format(int val) { return sb.toString(); } - // Get the IP and netmask in CIDR form, i.e. xxx.xxx.xxx.xxx/xx + // Get the IP and netmask in CIDR form, i.e. xxx.xxx.xxx.xxx/xx public String getCIDR() { int numberOfBits = maskBitNumMap.get(netmask); return format(address & netmask) + "/" + numberOfBits; @@ -150,7 +150,7 @@ private long networkLong() { return network & UNSIGNED_INT_MASK; } - private long broadcastLong(){ + private long broadcastLong() { long network = (address & netmask); long broadcast = network | ~(netmask); return broadcast & UNSIGNED_INT_MASK; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java index bad29927a708cb..213e8f59072f02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java @@ -61,7 +61,7 @@ public interface ConfHandler { static class DefaultConfHandler implements ConfHandler { @Override - public void handle(Field field, String confVal) throws Exception{ + public void handle(Field field, String confVal) throws Exception { setConfigField(field, confVal); } } @@ -299,7 +299,7 @@ public synchronized static void setMutableConfig(String key, String value) throw if (!anno.mutable()) { throw new DdlException("Config '" + key + "' is not mutable"); } - if (anno.masterOnly() && !Catalog.getCurrentCatalog().isMaster()){ + if (anno.masterOnly() && !Catalog.getCurrentCatalog().isMaster()) { throw new DdlException("Config '" + key + "' is master only"); } @@ -347,7 +347,7 @@ public synchronized static void persistConfig(Map customConf, bo File file = new File(customConfFile); if (!file.exists()) { file.createNewFile(); - } else if (resetPersist){ + } else if (resetPersist) { // clear the customConfFile content try (PrintWriter writer = new PrintWriter(file)) { writer.print(""); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java index 39b705fa64f4b1..a1ae7c0f5f74e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java @@ -45,8 +45,8 @@ public enum ErrorCode { ERR_CANT_READ_DIR(1018, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't read dir of '%s' (errno: %d)"), ERR_CANT_SET_WD(1019, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't change dir to '%s' (errno: %d)"), ERR_CHECKREAD(1020, new byte[]{'H', 'Y', '0', '0', '0'}, "Record has changed since last read in table '%s'"), - ERR_DISK_FULL(1021, new byte[]{'H', 'Y', '0', '0', '0'}, "Disk full (%s); waiting for someone to free some space." + - ".."), + ERR_DISK_FULL(1021, new byte[]{'H', 'Y', '0', '0', '0'}, "Disk full (%s); waiting for someone to free some space." + + ".."), ERR_DUP_KEY(1022, new byte[]{'2', '3', '0', '0', '0'}, "Can't write; duplicate key in table '%s'"), ERR_ERROR_ON_CLOSE(1023, new byte[]{'H', 'Y', '0', '0', '0'}, "Error on close of '%s' (errno: %d)"), ERR_ERROR_ON_READ(1024, new byte[]{'H', 'Y', '0', '0', '0'}, "Error reading file '%s' (errno: %d)"), @@ -62,23 +62,23 @@ public enum ErrorCode { ERR_NOT_KEYFILE(1034, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect key file for table '%s'; try to repair it"), ERR_OLD_KEYFILE(1035, new byte[]{'H', 'Y', '0', '0', '0'}, "Old key file for table '%s'; repair it!"), ERR_OPEN_AS_READONLY(1036, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is read only"), - ERR_OUTOFMEMORY(1037, new byte[]{'H', 'Y', '0', '0', '1'}, "Out of memory; restart server and try again (needed " + - "%d bytes)"), - ERR_OUT_OF_SORTMEMORY(1038, new byte[]{'H', 'Y', '0', '0', '1'}, "Out of sort memory, consider increasing server " + - "sort buffer size"), - ERR_UNEXPECTED_EOF(1039, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected EOF found when reading file '%s' " + - "(Errno: %d)"), + ERR_OUTOFMEMORY(1037, new byte[]{'H', 'Y', '0', '0', '1'}, "Out of memory; restart server and try again (needed " + + "%d bytes)"), + ERR_OUT_OF_SORTMEMORY(1038, new byte[]{'H', 'Y', '0', '0', '1'}, "Out of sort memory, consider increasing server " + + "sort buffer size"), + ERR_UNEXPECTED_EOF(1039, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected EOF found when reading file '%s' " + + "(Errno: %d)"), ERR_CON_COUNT_ERROR(1040, new byte[]{'0', '8', '0', '0', '4'}, "Too many connections"), - ERR_OUT_OF_RESOURCES(1041, new byte[]{'H', 'Y', '0', '0', '0'}, "Out of memory; check if mysqld or some other " + - "process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more " + - "memory or " + - "you can add more swap space"), + ERR_OUT_OF_RESOURCES(1041, new byte[]{'H', 'Y', '0', '0', '0'}, "Out of memory; check if mysqld or some other " + + "process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more " + + "memory or " + + "you can add more swap space"), ERR_BAD_HOST_ERROR(1042, new byte[]{'0', '8', 'S', '0', '1'}, "Can't get hostname for your address"), ERR_HANDSHAKE_ERROR(1043, new byte[]{'0', '8', 'S', '0', '1'}, "Bad handshake"), - ERR_DBACCESS_DENIED_ERROR(1044, new byte[]{'4', '2', '0', '0', '0'}, "Access denied for user '%s'@'%s' to " + - "database '%s'"), - ERR_ACCESS_DENIED_ERROR(1045, new byte[]{'2', '8', '0', '0', '0'}, "Access denied for user '%s' (using " + - "password: %s)"), + ERR_DBACCESS_DENIED_ERROR(1044, new byte[]{'4', '2', '0', '0', '0'}, "Access denied for user '%s'@'%s' to " + + "database '%s'"), + ERR_ACCESS_DENIED_ERROR(1045, new byte[]{'2', '8', '0', '0', '0'}, "Access denied for user '%s' (using " + + "password: %s)"), ERR_NO_DB_ERROR(1046, new byte[]{'3', 'D', '0', '0', '0'}, "No database selected"), ERR_UNKNOWN_COM_ERROR(1047, new byte[]{'0', '8', 'S', '0', '1'}, "Unknown command"), ERR_BAD_NULL_ERROR(1048, new byte[]{'2', '3', '0', '0', '0'}, "Column '%s' cannot be null"), @@ -90,8 +90,8 @@ public enum ErrorCode { ERR_BAD_FIELD_ERROR(1054, new byte[]{'4', '2', 'S', '2', '2'}, "Unknown column '%s' in '%s'"), ERR_WRONG_FIELD_WITH_GROUP(1055, new byte[]{'4', '2', '0', '0', '0'}, "'%s' isn't in GROUP BY"), ERR_WRONG_GROUP_FIELD(1056, new byte[]{'4', '2', '0', '0', '0'}, "Can't group on '%s'"), - ERR_WRONG_SUM_SELECT(1057, new byte[]{'4', '2', '0', '0', '0'}, "Statement has sum functions and columns in same " + - "statement"), + ERR_WRONG_SUM_SELECT(1057, new byte[]{'4', '2', '0', '0', '0'}, "Statement has sum functions and columns in same " + + "statement"), ERR_WRONG_VALUE_COUNT(1058, new byte[]{'2', '1', 'S', '0', '1'}, "Column count doesn't match value count"), ERR_TOO_LONG_IDENT(1059, new byte[]{'4', '2', '0', '0', '0'}, "Identifier name '%s' is too long"), ERR_DUP_FIELDNAME(1060, new byte[]{'4', '2', 'S', '2', '1'}, "Duplicate column name '%s'"), @@ -104,83 +104,83 @@ public enum ErrorCode { ERR_INVALID_DEFAULT(1067, new byte[]{'4', '2', '0', '0', '0'}, "Invalid default value for '%s'"), ERR_MULTIPLE_PRI_KEY(1068, new byte[]{'4', '2', '0', '0', '0'}, "Multiple primary key defined"), ERR_TOO_MANY_KEYS(1069, new byte[]{'4', '2', '0', '0', '0'}, "Too many keys specified; max %d keys allowed"), - ERR_TOO_MANY_KEY_PARTS(1070, new byte[]{'4', '2', '0', '0', '0'}, "Too many key parts specified; max %d parts " + - "allowed"), - ERR_TOO_LONG_KEY(1071, new byte[]{'4', '2', '0', '0', '0'}, "Specified key was too long; max key length is %d " + - "bytes"), + ERR_TOO_MANY_KEY_PARTS(1070, new byte[]{'4', '2', '0', '0', '0'}, "Too many key parts specified; max %d parts " + + "allowed"), + ERR_TOO_LONG_KEY(1071, new byte[]{'4', '2', '0', '0', '0'}, "Specified key was too long; max key length is %d " + + "bytes"), ERR_KEY_COLUMN_DOES_NOT_EXITS(1072, new byte[]{'4', '2', '0', '0', '0'}, "Key column '%s' doesn't exist in table"), - ERR_BLOB_USED_AS_KEY(1073, new byte[]{'4', '2', '0', '0', '0'}, "BLOB column '%s' can't be used in key " + - "specification with the used table type"), - ERR_TOO_BIG_FIELDLENGTH(1074, new byte[]{'4', '2', '0', '0', '0'}, "Column length too big for column '%s' (max = " + - "%d); use BLOB or TEXT instead"), - ERR_WRONG_AUTO_KEY(1075, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect table definition; there can be only one " + - "auto column and it must be defined as a key"), - ERR_READY(1076, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: ready for connections. Version: '%s' socket: '%s' port:" + - " %d"), + ERR_BLOB_USED_AS_KEY(1073, new byte[]{'4', '2', '0', '0', '0'}, "BLOB column '%s' can't be used in key " + + "specification with the used table type"), + ERR_TOO_BIG_FIELDLENGTH(1074, new byte[]{'4', '2', '0', '0', '0'}, "Column length too big for column '%s' (max = " + + "%d); use BLOB or TEXT instead"), + ERR_WRONG_AUTO_KEY(1075, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect table definition; there can be only one " + + "auto column and it must be defined as a key"), + ERR_READY(1076, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: ready for connections. Version: '%s' socket: '%s' port:" + + " %d"), ERR_NORMAL_SHUTDOWN(1077, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: Normal shutdown"), ERR_GOT_SIGNAL(1078, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: Got signal %d. Aborting!"), ERR_SHUTDOWN_COMPLETE(1079, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: Shutdown complete"), ERR_FORCING_CLOSE(1080, new byte[]{'0', '8', 'S', '0', '1'}, "%s: Forcing close of thread %d user: '%s'"), ERR_IPSOCK_ERROR(1081, new byte[]{'0', '8', 'S', '0', '1'}, "Can't create IP socket"), - ERR_NO_SUCH_INDEX(1082, new byte[]{'4', '2', 'S', '1', '2'}, "Table '%s' has no index like the one used in CREATE" + - " INDEX; recreate the table"), - ERR_WRONG_FIELD_TERMINATORS(1083, new byte[]{'4', '2', '0', '0', '0'}, "Field separator argument is not what is " + - "expected; check the manual"), - ERR_BLOBS_AND_NO_TERMINATED(1084, new byte[]{'4', '2', '0', '0', '0'}, "You can't use fixed rowlength with BLOBs;" + - " please use 'fields terminated by'"), - ERR_TEXTFILE_NOT_READABLE(1085, new byte[]{'H', 'Y', '0', '0', '0'}, "The file '%s' must be in the database " + - "directory or be readable by all"), + ERR_NO_SUCH_INDEX(1082, new byte[]{'4', '2', 'S', '1', '2'}, "Table '%s' has no index like the one used in CREATE" + + " INDEX; recreate the table"), + ERR_WRONG_FIELD_TERMINATORS(1083, new byte[]{'4', '2', '0', '0', '0'}, "Field separator argument is not what is " + + "expected; check the manual"), + ERR_BLOBS_AND_NO_TERMINATED(1084, new byte[]{'4', '2', '0', '0', '0'}, "You can't use fixed rowlength with BLOBs;" + + " please use 'fields terminated by'"), + ERR_TEXTFILE_NOT_READABLE(1085, new byte[]{'H', 'Y', '0', '0', '0'}, "The file '%s' must be in the database " + + "directory or be readable by all"), ERR_FILE_EXISTS_ERROR(1086, new byte[]{'H', 'Y', '0', '0', '0'}, "File '%s' already exists"), ERR_LOAD_INF(1087, new byte[]{'H', 'Y', '0', '0', '0'}, "Records: %d Deleted: %d Skipped: %d Warnings: %d"), ERR_ALTER_INF(1088, new byte[]{'H', 'Y', '0', '0', '0'}, "Records: %d Duplicates: %d"), - ERR_WRONG_SUB_KEY(1089, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect prefix key; the used key part isn't a " + - "string, the used length is longer than the key part, or the storage engine doesn't support unique prefix" + - " keys"), - ERR_CANT_REMOVE_ALL_FIELDS(1090, new byte[]{'4', '2', '0', '0', '0'}, "You can't delete all columns with ALTER " + - "TABLE; use DROP TABLE instead"), - ERR_CANT_DROP_FIELD_OR_KEY(1091, new byte[]{'4', '2', '0', '0', '0'}, "Can't DROP '%s'; check that column/key " + - "exists"), + ERR_WRONG_SUB_KEY(1089, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect prefix key; the used key part isn't a " + + "string, the used length is longer than the key part, or the storage engine doesn't support unique prefix" + + " keys"), + ERR_CANT_REMOVE_ALL_FIELDS(1090, new byte[]{'4', '2', '0', '0', '0'}, "You can't delete all columns with ALTER " + + "TABLE; use DROP TABLE instead"), + ERR_CANT_DROP_FIELD_OR_KEY(1091, new byte[]{'4', '2', '0', '0', '0'}, "Can't DROP '%s'; check that column/key " + + "exists"), ERR_INSERT_INF(1092, new byte[]{'H', 'Y', '0', '0', '0'}, "Records: %d Duplicates: %d Warnings: %d"), - ERR_UPDATE_TABLE_USED(1093, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't specify target table '%s' for update " + - "in FROM clause"), + ERR_UPDATE_TABLE_USED(1093, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't specify target table '%s' for update " + + "in FROM clause"), ERR_NO_SUCH_THREAD(1094, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown thread id: %d"), ERR_KILL_DENIED_ERROR(1095, new byte[]{'H', 'Y', '0', '0', '0'}, "You are not owner of thread %d"), ERR_NO_TABLES_USED(1096, new byte[]{'H', 'Y', '0', '0', '0'}, "No tables used"), ERR_TOO_BIG_SET(1097, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many strings for column %s and SET"), ERR_NO_UNIQUE_LOGFILE(1098, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't generate a unique log-filename %s.(1-999)"), - ERR_TABLE_NOT_LOCKED_FOR_WRITE(1099, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' was locked with a READ lock" + - " and can't be updated"), + ERR_TABLE_NOT_LOCKED_FOR_WRITE(1099, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' was locked with a READ lock" + + " and can't be updated"), ERR_TABLE_NOT_LOCKED(1100, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' was not locked with LOCK TABLES"), ERR_UNUSED_17(1101, new byte[]{}, "You should never see it"), ERR_WRONG_DB_NAME(1102, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect database name '%s'"), ERR_WRONG_TABLE_NAME(1103, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect table name '%s'"), - ERR_TOO_BIG_SELECT(1104, new byte[]{'4', '2', '0', '0', '0'}, "The SELECT would examine more than MAX_JOIN_SIZE " + - "rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay"), + ERR_TOO_BIG_SELECT(1104, new byte[]{'4', '2', '0', '0', '0'}, "The SELECT would examine more than MAX_JOIN_SIZE " + + "rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay"), ERR_UNKNOWN_ERROR(1105, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown error"), ERR_UNKNOWN_PROCEDURE(1106, new byte[]{'4', '2', '0', '0', '0'}, "Unknown procedure '%s'"), - ERR_WRONG_PARAMCOUNT_TO_PROCEDURE(1107, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameter count to " + - "procedure '%s'"), - ERR_WRONG_PARAMETERS_TO_PROCEDURE(1108, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect parameters to procedure " + - "'%s'"), + ERR_WRONG_PARAMCOUNT_TO_PROCEDURE(1107, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameter count to " + + "procedure '%s'"), + ERR_WRONG_PARAMETERS_TO_PROCEDURE(1108, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect parameters to procedure " + + "'%s'"), ERR_UNKNOWN_TABLE(1109, new byte[]{'4', '2', 'S', '0', '2'}, "Unknown table '%s' in %s"), ERR_FIELD_SPECIFIED_TWICE(1110, new byte[]{'4', '2', '0', '0', '0'}, "Column '%s' specified twice"), ERR_INVALID_GROUP_FUNC_USE(1111, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid use of group function"), - ERR_UNSUPPORTED_EXTENSION(1112, new byte[]{'4', '2', '0', '0', '0'}, "Table '%s' uses an extension that doesn't " + - "exist in this MariaDB version"), + ERR_UNSUPPORTED_EXTENSION(1112, new byte[]{'4', '2', '0', '0', '0'}, "Table '%s' uses an extension that doesn't " + + "exist in this MariaDB version"), ERR_TABLE_MUST_HAVE_COLUMNS(1113, new byte[]{'4', '2', '0', '0', '0'}, "A table must have at least 1 column"), ERR_RECORD_FILE_FULL(1114, new byte[]{'H', 'Y', '0', '0', '0'}, "The table '%s' is full"), ERR_UNKNOWN_CHARACTER_SET(1115, new byte[]{'4', '2', '0', '0', '0'}, "Unknown character set: '%s'"), - ERR_TOO_MANY_TABLES(1116, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many tables; MariaDB can only use %d tables " + - "in a join"), + ERR_TOO_MANY_TABLES(1116, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many tables; MariaDB can only use %d tables " + + "in a join"), ERR_TOO_MANY_FIELDS(1117, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many columns"), - ERR_TOO_BIG_ROWSIZE(1118, new byte[]{'4', '2', '0', '0', '0'}, "Row size too large. The maximum row size for the " + - "used table type, not counting BLOBs, is %d. You have to change some columns to TEXT or BLOBs"), - ERR_STACK_OVERRUN(1119, new byte[]{'H', 'Y', '0', '0', '0'}, "Thread stack overrun: Used: %d of a %d stack. Use" + - " 'mysqld --thread_stack=#' to specify a bigger stack if needed"), - ERR_WRONG_OUTER_JOIN(1120, new byte[]{'4', '2', '0', '0', '0'}, "Cross dependency found in OUTER JOIN; examine " + - "your ON conditions"), - ERR_NULL_COLUMN_IN_INDEX(1121, new byte[]{'4', '2', '0', '0', '0'}, "Table handler doesn't support NULL in given " + - "index. Please change column '%s' to be NOT NULL or use another handler"), + ERR_TOO_BIG_ROWSIZE(1118, new byte[]{'4', '2', '0', '0', '0'}, "Row size too large. The maximum row size for the " + + "used table type, not counting BLOBs, is %d. You have to change some columns to TEXT or BLOBs"), + ERR_STACK_OVERRUN(1119, new byte[]{'H', 'Y', '0', '0', '0'}, "Thread stack overrun: Used: %d of a %d stack. Use" + + " 'mysqld --thread_stack=#' to specify a bigger stack if needed"), + ERR_WRONG_OUTER_JOIN(1120, new byte[]{'4', '2', '0', '0', '0'}, "Cross dependency found in OUTER JOIN; examine " + + "your ON conditions"), + ERR_NULL_COLUMN_IN_INDEX(1121, new byte[]{'4', '2', '0', '0', '0'}, "Table handler doesn't support NULL in given " + + "index. Please change column '%s' to be NOT NULL or use another handler"), ERR_CANT_FIND_UDF(1122, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't load function '%s'"), ERR_CANT_INITIALIZE_UDF(1123, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't initialize function '%s'; %s"), ERR_UDF_NO_PATHS(1124, new byte[]{'H', 'Y', '0', '0', '0'}, "No paths allowed for shared library"), @@ -188,51 +188,51 @@ public enum ErrorCode { ERR_CANT_OPEN_LIBRARY(1126, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't open shared library '%s' (Errno: %d %s)"), ERR_CANT_FIND_DL_ENTRY(1127, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't find symbol '%s' in library"), ERR_FUNCTION_NOT_DEFINED(1128, new byte[]{'H', 'Y', '0', '0', '0'}, "Function '%s' is not defined"), - ERR_HOST_IS_BLOCKED(1129, new byte[]{'H', 'Y', '0', '0', '0'}, "Host '%s' is blocked because of many connection " + - "errors; unblock with 'mysqladmin flush-hosts'"), - ERR_HOST_NOT_PRIVILEGED(1130, new byte[]{'H', 'Y', '0', '0', '0'}, "Host '%s' is not allowed to connect to this " + - "MariaDB server"), - ERR_PASSWORD_ANONYMOUS_USER(1131, new byte[]{'4', '2', '0', '0', '0'}, "You are using MariaDB as an anonymous " + - "user and anonymous users are not allowed to change passwords"), - ERR_PASSWORD_NOT_ALLOWED(1132, new byte[]{'4', '2', '0', '0', '0'}, "You must have privileges to update tables in" + - " the mysql database to be able to change passwords for others"), + ERR_HOST_IS_BLOCKED(1129, new byte[]{'H', 'Y', '0', '0', '0'}, "Host '%s' is blocked because of many connection " + + "errors; unblock with 'mysqladmin flush-hosts'"), + ERR_HOST_NOT_PRIVILEGED(1130, new byte[]{'H', 'Y', '0', '0', '0'}, "Host '%s' is not allowed to connect to this " + + "MariaDB server"), + ERR_PASSWORD_ANONYMOUS_USER(1131, new byte[]{'4', '2', '0', '0', '0'}, "You are using MariaDB as an anonymous " + + "user and anonymous users are not allowed to change passwords"), + ERR_PASSWORD_NOT_ALLOWED(1132, new byte[]{'4', '2', '0', '0', '0'}, "You must have privileges to update tables in" + + " the mysql database to be able to change passwords for others"), ERR_PASSWORD_NO_MATCH(1133, new byte[]{'4', '2', '0', '0', '0'}, "Can't find any matching row in the user table"), ERR_UPDATE_INF(1134, new byte[]{'H', 'Y', '0', '0', '0'}, "Rows matched: %d Changed: %d Warnings: %d"), - ERR_CANT_CREATE_THREAD(1135, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create a new thread (Errno %d); if you " + - "are not out of available memory, you can consult the manual for a possible OS-dependent bug"), - ERR_WRONG_VALUE_COUNT_ON_ROW(1136, new byte[]{'2', '1', 'S', '0', '1'}, "Column count doesn't match value count " + - "at row %d"), + ERR_CANT_CREATE_THREAD(1135, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create a new thread (Errno %d); if you " + + "are not out of available memory, you can consult the manual for a possible OS-dependent bug"), + ERR_WRONG_VALUE_COUNT_ON_ROW(1136, new byte[]{'2', '1', 'S', '0', '1'}, "Column count doesn't match value count " + + "at row %d"), ERR_CANT_REOPEN_TABLE(1137, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't reopen table: '%s'"), ERR_INVALID_USE_OF_NULL(1138, new byte[]{'2', '2', '0', '0', '4'}, "Invalid use of NULL value"), ERR_REGEXP_ERROR(1139, new byte[]{'4', '2', '0', '0', '0'}, "Got error '%s' from regexp"), - ERR_MIX_OF_GROUP_FUNC_AND_FIELDS(1140, new byte[]{'4', '2', '0', '0', '0'}, "Mixing of GROUP columns (MIN(),MAX()" + - ",COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"), - ERR_NONEXISTING_GRANT(1141, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user '%s' on" + - " host '%s'"), - ERR_TABLEACCESS_DENIED_ERROR(1142, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for " + - "table '%s'"), - ERR_COLUMNACCESS_DENIED_ERROR(1143, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for" + - " column '%s' in table '%s'"), - ERR_ILLEGAL_GRANT_FOR_TABLE(1144, new byte[]{'4', '2', '0', '0', '0'}, "Illegal GRANT/REVOKE command; please " + - "consult the manual to see which privileges can be used"), - ERR_GRANT_WRONG_HOST_OR_USER(1145, new byte[]{'4', '2', '0', '0', '0'}, "The host or user argument to GRANT is " + - "too long"), + ERR_MIX_OF_GROUP_FUNC_AND_FIELDS(1140, new byte[]{'4', '2', '0', '0', '0'}, "Mixing of GROUP columns (MIN(),MAX()" + + ",COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"), + ERR_NONEXISTING_GRANT(1141, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user '%s' on" + + " host '%s'"), + ERR_TABLEACCESS_DENIED_ERROR(1142, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for " + + "table '%s'"), + ERR_COLUMNACCESS_DENIED_ERROR(1143, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for" + + " column '%s' in table '%s'"), + ERR_ILLEGAL_GRANT_FOR_TABLE(1144, new byte[]{'4', '2', '0', '0', '0'}, "Illegal GRANT/REVOKE command; please " + + "consult the manual to see which privileges can be used"), + ERR_GRANT_WRONG_HOST_OR_USER(1145, new byte[]{'4', '2', '0', '0', '0'}, "The host or user argument to GRANT is " + + "too long"), ERR_NO_SUCH_TABLE(1146, new byte[]{'4', '2', 'S', '0', '2'}, "Table '%s.%s' doesn't exist"), - ERR_NONEXISTING_TABLE_GRANT(1147, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user " + - "'%s' on host '%s' on table '%s'"), - ERR_NOT_ALLOWED_COMMAND(1148, new byte[]{'4', '2', '0', '0', '0'}, "The used command is not allowed with this " + - "MariaDB version"), - ERR_SYNTAX_ERROR(1149, new byte[]{'4', '2', '0', '0', '0'}, "You have an error in your SQL syntax; check the " + - "manual that corresponds to your MariaDB server version for the right syntax to use"), - ERR_DELAYED_CANT_CHANGE_LOCK(1150, new byte[]{'H', 'Y', '0', '0', '0'}, "Delayed insert thread couldn't get " + - "requested lock for table %s"), + ERR_NONEXISTING_TABLE_GRANT(1147, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user " + + "'%s' on host '%s' on table '%s'"), + ERR_NOT_ALLOWED_COMMAND(1148, new byte[]{'4', '2', '0', '0', '0'}, "The used command is not allowed with this " + + "MariaDB version"), + ERR_SYNTAX_ERROR(1149, new byte[]{'4', '2', '0', '0', '0'}, "You have an error in your SQL syntax; check the " + + "manual that corresponds to your MariaDB server version for the right syntax to use"), + ERR_DELAYED_CANT_CHANGE_LOCK(1150, new byte[]{'H', 'Y', '0', '0', '0'}, "Delayed insert thread couldn't get " + + "requested lock for table %s"), ERR_TOO_MANY_DELAYED_THREADS(1151, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many delayed threads in use"), - ERR_ABORTING_CONNECTION(1152, new byte[]{'0', '8', 'S', '0', '1'}, "Aborted connection %d to db: '%s' user: '%s'" + - " (%s)"), - ERR_NET_PACKET_TOO_LARGE(1153, new byte[]{'0', '8', 'S', '0', '1'}, "Got a packet bigger than " + - "'max_allowed_packet' bytes"), - ERR_NET_READ_ERROR_FROM_PIPE(1154, new byte[]{'0', '8', 'S', '0', '1'}, "Got a read error from the connection " + - "pipe"), + ERR_ABORTING_CONNECTION(1152, new byte[]{'0', '8', 'S', '0', '1'}, "Aborted connection %d to db: '%s' user: '%s'" + + " (%s)"), + ERR_NET_PACKET_TOO_LARGE(1153, new byte[]{'0', '8', 'S', '0', '1'}, "Got a packet bigger than " + + "'max_allowed_packet' bytes"), + ERR_NET_READ_ERROR_FROM_PIPE(1154, new byte[]{'0', '8', 'S', '0', '1'}, "Got a read error from the connection " + + "pipe"), ERR_NET_FCNTL_ERROR(1155, new byte[]{'0', '8', 'S', '0', '1'}, "Got an error from fcntl()"), ERR_NET_PACKETS_OUT_OF_ORDER(1156, new byte[]{'0', '8', 'S', '0', '1'}, "Got packets out of order"), ERR_NET_UNCOMPRESS_ERROR(1157, new byte[]{'0', '8', 'S', '0', '1'}, "Couldn't uncompress communication packet"), @@ -240,235 +240,235 @@ public enum ErrorCode { ERR_NET_READ_INTERRUPTED(1159, new byte[]{'0', '8', 'S', '0', '1'}, "Got timeout reading communication packets"), ERR_NET_ERROR_ON_WRITE(1160, new byte[]{'0', '8', 'S', '0', '1'}, "Got an error writing communication packets"), ERR_NET_WRITE_INTERRUPTED(1161, new byte[]{'0', '8', 'S', '0', '1'}, "Got timeout writing communication packets"), - ERR_TOO_LONG_STRING(1162, new byte[]{'4', '2', '0', '0', '0'}, "Result string is longer than 'max_allowed_packet'" + - " bytes"), - ERR_TABLE_CANT_HANDLE_BLOB(1163, new byte[]{'4', '2', '0', '0', '0'}, "The used table type doesn't support " + - "BLOB/TEXT columns"), - ERR_TABLE_CANT_HANDLE_AUTO_INCREMENT(1164, new byte[]{'4', '2', '0', '0', '0'}, "The used table type doesn't " + - "support AUTO_INCREMENT columns"), - ERR_DELAYED_INSERT_TABLE_LOCKED(1165, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT DELAYED can't be used with " + - "table '%s' because it is locked with LOCK TABLES"), + ERR_TOO_LONG_STRING(1162, new byte[]{'4', '2', '0', '0', '0'}, "Result string is longer than 'max_allowed_packet'" + + " bytes"), + ERR_TABLE_CANT_HANDLE_BLOB(1163, new byte[]{'4', '2', '0', '0', '0'}, "The used table type doesn't support " + + "BLOB/TEXT columns"), + ERR_TABLE_CANT_HANDLE_AUTO_INCREMENT(1164, new byte[]{'4', '2', '0', '0', '0'}, "The used table type doesn't " + + "support AUTO_INCREMENT columns"), + ERR_DELAYED_INSERT_TABLE_LOCKED(1165, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT DELAYED can't be used with " + + "table '%s' because it is locked with LOCK TABLES"), ERR_WRONG_COLUMN_NAME(1166, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect column name '%s'"), ERR_WRONG_KEY_COLUMN(1167, new byte[]{'4', '2', '0', '0', '0'}, "The used storage engine can't index column '%s'"), - ERR_WRONG_MRG_TABLE(1168, new byte[]{'H', 'Y', '0', '0', '0'}, "Unable to open underlying table which is " + - "differently defined or of non-MyISAM type or doesn't exist"), - ERR_DUP_UNIQUE(1169, new byte[]{'2', '3', '0', '0', '0'}, "Can't write, because of unique constraint, to table " + - "'%s'"), - ERR_BLOB_KEY_WITHOUT_LENGTH(1170, new byte[]{'4', '2', '0', '0', '0'}, "BLOB/TEXT column '%s' used in key " + - "specification without a key length"), - ERR_PRIMARY_CANT_HAVE_NULL(1171, new byte[]{'4', '2', '0', '0', '0'}, "All parts of a PRIMARY KEY must be NOT " + - "NULL; if you need NULL in a key, use UNIQUE instead"), + ERR_WRONG_MRG_TABLE(1168, new byte[]{'H', 'Y', '0', '0', '0'}, "Unable to open underlying table which is " + + "differently defined or of non-MyISAM type or doesn't exist"), + ERR_DUP_UNIQUE(1169, new byte[]{'2', '3', '0', '0', '0'}, "Can't write, because of unique constraint, to table " + + "'%s'"), + ERR_BLOB_KEY_WITHOUT_LENGTH(1170, new byte[]{'4', '2', '0', '0', '0'}, "BLOB/TEXT column '%s' used in key " + + "specification without a key length"), + ERR_PRIMARY_CANT_HAVE_NULL(1171, new byte[]{'4', '2', '0', '0', '0'}, "All parts of a PRIMARY KEY must be NOT " + + "NULL; if you need NULL in a key, use UNIQUE instead"), ERR_TOO_MANY_ROWS(1172, new byte[]{'4', '2', '0', '0', '0'}, "Result consisted of more than one row"), ERR_REQUIRES_PRIMARY_KEY(1173, new byte[]{'4', '2', '0', '0', '0'}, "This table type requires a primary key"), - ERR_NO_RAID_COMPILED(1174, new byte[]{'H', 'Y', '0', '0', '0'}, "This version of MariaDB is not compiled with " + - "RAID support"), - ERR_UPDATE_WITHOUT_KEY_IN_SAFE_MODE(1175, new byte[]{'H', 'Y', '0', '0', '0'}, "You are using safe update mode " + - "and you tried to update a table without a WHERE that uses a KEY column"), + ERR_NO_RAID_COMPILED(1174, new byte[]{'H', 'Y', '0', '0', '0'}, "This version of MariaDB is not compiled with " + + "RAID support"), + ERR_UPDATE_WITHOUT_KEY_IN_SAFE_MODE(1175, new byte[]{'H', 'Y', '0', '0', '0'}, "You are using safe update mode " + + "and you tried to update a table without a WHERE that uses a KEY column"), ERR_KEY_DOES_NOT_EXITS(1176, new byte[]{'4', '2', '0', '0', '0'}, "Key '%s' doesn't exist in table '%s'"), ERR_CHECK_NO_SUCH_TABLE(1177, new byte[]{'4', '2', '0', '0', '0'}, "Can't open table"), - ERR_CHECK_NOT_IMPLEMENTED(1178, new byte[]{'4', '2', '0', '0', '0'}, "The storage engine for the table doesn't " + - "support %s"), - ERR_CANT_DO_THIS_DURING_AN_TRANSACTION(1179, new byte[]{'2', '5', '0', '0', '0'}, "You are not allowed to execute" + - " this command in a transaction"), + ERR_CHECK_NOT_IMPLEMENTED(1178, new byte[]{'4', '2', '0', '0', '0'}, "The storage engine for the table doesn't " + + "support %s"), + ERR_CANT_DO_THIS_DURING_AN_TRANSACTION(1179, new byte[]{'2', '5', '0', '0', '0'}, "You are not allowed to execute" + + " this command in a transaction"), ERR_ERROR_DURING_COMMIT(1180, new byte[]{'H', 'Y', '0', '0', '0'}, "Got error %d during COMMIT"), ERR_ERROR_DURING_ROLLBACK(1181, new byte[]{'H', 'Y', '0', '0', '0'}, "Got error %d during ROLLBACK"), ERR_ERROR_DURING_FLUSH_LOGS(1182, new byte[]{'H', 'Y', '0', '0', '0'}, "Got error %d during FLUSH_LOGS"), ERR_ERROR_DURING_CHECKPOINT(1183, new byte[]{'H', 'Y', '0', '0', '0'}, "Got error %d during CHECKPOINT"), - ERR_NEW_ABORTING_CONNECTION(1184, new byte[]{'0', '8', 'S', '0', '1'}, "Aborted connection %d to db: '%s' user: " + - "'%s' host: '%s' (%s)"), + ERR_NEW_ABORTING_CONNECTION(1184, new byte[]{'0', '8', 'S', '0', '1'}, "Aborted connection %d to db: '%s' user: " + + "'%s' host: '%s' (%s)"), ERR_UNUSED_10(1185, new byte[]{}, "You should never see it"), ERR_FLUSH_MASTER_BINLOG_CLOSED(1186, new byte[]{'H', 'Y', '0', '0', '0'}, "Binlog closed, cannot RESET MASTER"), ERR_INDEX_REBUILD(1187, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed rebuilding the index of dumped table '%s'"), ERR_MASTER(1188, new byte[]{'H', 'Y', '0', '0', '0'}, "Error from master: '%s'"), ERR_MASTER_NET_READ(1189, new byte[]{'0', '8', 'S', '0', '1'}, "Net error reading from master"), ERR_MASTER_NET_WRITE(1190, new byte[]{'0', '8', 'S', '0', '1'}, "Net error writing to master"), - ERR_FT_MATCHING_KEY_NOT_FOUND(1191, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't find FULLTEXT index matching the " + - "column list"), - ERR_LOCK_OR_ACTIVE_TRANSACTION(1192, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't execute the given command " + - "because you have active locked tables or an active transaction"), + ERR_FT_MATCHING_KEY_NOT_FOUND(1191, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't find FULLTEXT index matching the " + + "column list"), + ERR_LOCK_OR_ACTIVE_TRANSACTION(1192, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't execute the given command " + + "because you have active locked tables or an active transaction"), ERR_UNKNOWN_SYSTEM_VARIABLE(1193, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown system variable '%s'"), - ERR_CRASHED_ON_USAGE(1194, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is marked as crashed and should be " + - "repaired"), - ERR_CRASHED_ON_REPAIR(1195, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is marked as crashed and last " + - "(automatic?) repair failed"), - ERR_WARNING_NOT_COMPLETE_ROLLBACK(1196, new byte[]{'H', 'Y', '0', '0', '0'}, "Some non-transactional changed " + - "tables couldn't be rolled back"), - ERR_TRANS_CACHE_FULL(1197, new byte[]{'H', 'Y', '0', '0', '0'}, "Multi-statement transaction required more than " + - "'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again"), - ERR_SLAVE_MUST_STOP(1198, new byte[]{'H', 'Y', '0', '0', '0'}, "This operation cannot be performed with a running" + - " slave; run STOP SLAVE first"), - ERR_SLAVE_NOT_RUNNING(1199, new byte[]{'H', 'Y', '0', '0', '0'}, "This operation requires a running slave; " + - "configure slave and do START SLAVE"), - ERR_BAD_SLAVE(1200, new byte[]{'H', 'Y', '0', '0', '0'}, "The server is not configured as slave; fix in config " + - "file or with CHANGE MASTER TO"), - ERR_MASTER_INF(1201, new byte[]{'H', 'Y', '0', '0', '0'}, "Could not initialize master info structure; more error" + - " messages can be found in the MariaDB error log"), - ERR_SLAVE_THREAD(1202, new byte[]{'H', 'Y', '0', '0', '0'}, "Could not create slave thread; check system " + - "resources"), - ERR_TOO_MANY_USER_CONNECTIONS(1203, new byte[]{'4', '2', '0', '0', '0'}, "User %s already has more than " + - "'max_user_connections' active connections"), + ERR_CRASHED_ON_USAGE(1194, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is marked as crashed and should be " + + "repaired"), + ERR_CRASHED_ON_REPAIR(1195, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is marked as crashed and last " + + "(automatic?) repair failed"), + ERR_WARNING_NOT_COMPLETE_ROLLBACK(1196, new byte[]{'H', 'Y', '0', '0', '0'}, "Some non-transactional changed " + + "tables couldn't be rolled back"), + ERR_TRANS_CACHE_FULL(1197, new byte[]{'H', 'Y', '0', '0', '0'}, "Multi-statement transaction required more than " + + "'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again"), + ERR_SLAVE_MUST_STOP(1198, new byte[]{'H', 'Y', '0', '0', '0'}, "This operation cannot be performed with a running" + + " slave; run STOP SLAVE first"), + ERR_SLAVE_NOT_RUNNING(1199, new byte[]{'H', 'Y', '0', '0', '0'}, "This operation requires a running slave; " + + "configure slave and do START SLAVE"), + ERR_BAD_SLAVE(1200, new byte[]{'H', 'Y', '0', '0', '0'}, "The server is not configured as slave; fix in config " + + "file or with CHANGE MASTER TO"), + ERR_MASTER_INF(1201, new byte[]{'H', 'Y', '0', '0', '0'}, "Could not initialize master info structure; more error" + + " messages can be found in the MariaDB error log"), + ERR_SLAVE_THREAD(1202, new byte[]{'H', 'Y', '0', '0', '0'}, "Could not create slave thread; check system " + + "resources"), + ERR_TOO_MANY_USER_CONNECTIONS(1203, new byte[]{'4', '2', '0', '0', '0'}, "User %s already has more than " + + "'max_user_connections' active connections"), ERR_SET_CONSTANTS_ONLY(1204, new byte[]{'H', 'Y', '0', '0', '0'}, "You may only use constant expressions with SET"), - ERR_LOCK_WAIT_TIMEOUT(1205, new byte[]{'H', 'Y', '0', '0', '0'}, "Lock wait timeout exceeded; try restarting " + - "transaction"), - ERR_LOCK_TABLE_FULL(1206, new byte[]{'H', 'Y', '0', '0', '0'}, "The total number of locks exceeds the lock table " + - "size"), - ERR_READ_ONLY_TRANSACTION(1207, new byte[]{'2', '5', '0', '0', '0'}, "Update locks cannot be acquired during a " + - "READ UNCOMMITTED transaction"), - ERR_DROP_DB_WITH_READ_LOCK(1208, new byte[]{'H', 'Y', '0', '0', '0'}, "DROP DATABASE not allowed while thread is " + - "holding global read lock"), - ERR_CREATE_DB_WITH_READ_LOCK(1209, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE DATABASE not allowed while thread" + - " is holding global read lock"), + ERR_LOCK_WAIT_TIMEOUT(1205, new byte[]{'H', 'Y', '0', '0', '0'}, "Lock wait timeout exceeded; try restarting " + + "transaction"), + ERR_LOCK_TABLE_FULL(1206, new byte[]{'H', 'Y', '0', '0', '0'}, "The total number of locks exceeds the lock table " + + "size"), + ERR_READ_ONLY_TRANSACTION(1207, new byte[]{'2', '5', '0', '0', '0'}, "Update locks cannot be acquired during a " + + "READ UNCOMMITTED transaction"), + ERR_DROP_DB_WITH_READ_LOCK(1208, new byte[]{'H', 'Y', '0', '0', '0'}, "DROP DATABASE not allowed while thread is " + + "holding global read lock"), + ERR_CREATE_DB_WITH_READ_LOCK(1209, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE DATABASE not allowed while thread" + + " is holding global read lock"), ERR_WRONG_ARGUMENTS(1210, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect arguments to %s"), - ERR_NO_PERMISSION_TO_CREATE_USER(1211, new byte[]{'4', '2', '0', '0', '0'}, "'%s'@'%s' is not allowed to create " + - "new users"), - ERR_UNION_TABLES_IN_DIFFERENT_DIR(1212, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect table definition; all " + - "MERGE tables must be in the same database"), - ERR_LOCK_DEADLOCK(1213, new byte[]{'4', '0', '0', '0', '1'}, "Deadlock found when trying to get lock; try " + - "restarting transaction"), - ERR_TABLE_CANT_HANDLE_FT(1214, new byte[]{'H', 'Y', '0', '0', '0'}, "The used table type doesn't support FULLTEXT" + - " indexes"), + ERR_NO_PERMISSION_TO_CREATE_USER(1211, new byte[]{'4', '2', '0', '0', '0'}, "'%s'@'%s' is not allowed to create " + + "new users"), + ERR_UNION_TABLES_IN_DIFFERENT_DIR(1212, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect table definition; all " + + "MERGE tables must be in the same database"), + ERR_LOCK_DEADLOCK(1213, new byte[]{'4', '0', '0', '0', '1'}, "Deadlock found when trying to get lock; try " + + "restarting transaction"), + ERR_TABLE_CANT_HANDLE_FT(1214, new byte[]{'H', 'Y', '0', '0', '0'}, "The used table type doesn't support FULLTEXT" + + " indexes"), ERR_CANNOT_ADD_FOREIGN(1215, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot add foreign key constraint"), - ERR_NO_REFERENCED_ROW(1216, new byte[]{'2', '3', '0', '0', '0'}, "Cannot add or update a child row: a foreign key" + - " constraint fails"), - ERR_ROW_IS_REFERENCED(1217, new byte[]{'2', '3', '0', '0', '0'}, "Cannot delete or update a parent row: a foreign" + - " key constraint fails"), + ERR_NO_REFERENCED_ROW(1216, new byte[]{'2', '3', '0', '0', '0'}, "Cannot add or update a child row: a foreign key" + + " constraint fails"), + ERR_ROW_IS_REFERENCED(1217, new byte[]{'2', '3', '0', '0', '0'}, "Cannot delete or update a parent row: a foreign" + + " key constraint fails"), ERR_CONNECT_TO_MASTER(1218, new byte[]{'0', '8', 'S', '0', '1'}, "Error connecting to master: %s"), ERR_QUERY_ON_MASTER(1219, new byte[]{'H', 'Y', '0', '0', '0'}, "Error running query on master: %s"), ERR_ERROR_WHEN_EXECUTING_COMMAND(1220, new byte[]{'H', 'Y', '0', '0', '0'}, "Error when executing command %s: %s"), ERR_WRONG_USAGE(1221, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect usage of %s and %s"), - ERR_WRONG_NUMBER_OF_COLUMNS_IN_SELECT(1222, new byte[]{'2', '1', '0', '0', '0'}, "The used SELECT statements have" + - " a different number of columns"), - ERR_CANT_UPDATE_WITH_READLOCK(1223, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't execute the query because you " + - "have a conflicting read lock"), - ERR_MIXING_NOT_ALLOWED(1224, new byte[]{'H', 'Y', '0', '0', '0'}, "Mixing of transactional and non-transactional " + - "tables is disabled"), + ERR_WRONG_NUMBER_OF_COLUMNS_IN_SELECT(1222, new byte[]{'2', '1', '0', '0', '0'}, "The used SELECT statements have" + + " a different number of columns"), + ERR_CANT_UPDATE_WITH_READLOCK(1223, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't execute the query because you " + + "have a conflicting read lock"), + ERR_MIXING_NOT_ALLOWED(1224, new byte[]{'H', 'Y', '0', '0', '0'}, "Mixing of transactional and non-transactional " + + "tables is disabled"), ERR_DUP_ARGUMENT(1225, new byte[]{'H', 'Y', '0', '0', '0'}, "Option '%s' used twice in statement"), - ERR_USER_LIMIT_REACHED(1226, new byte[]{'4', '2', '0', '0', '0'}, "User '%s' has exceeded the '%s' resource " + - "(current value: %d)"), - ERR_SPECIFIC_ACCESS_DENIED_ERROR(1227, new byte[]{'4', '2', '0', '0', '0'}, "Access denied; you need (at least " + - "one of) the %s privilege(s) for this operation"), - ERR_LOCAL_VARIABLE(1228, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is a SESSION variable and can't be " + - "used with SET GLOBAL"), - ERR_GLOBAL_VARIABLE(1229, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is a GLOBAL variable and should be " + - "set with SET GLOBAL"), + ERR_USER_LIMIT_REACHED(1226, new byte[]{'4', '2', '0', '0', '0'}, "User '%s' has exceeded the '%s' resource " + + "(current value: %d)"), + ERR_SPECIFIC_ACCESS_DENIED_ERROR(1227, new byte[]{'4', '2', '0', '0', '0'}, "Access denied; you need (at least " + + "one of) the %s privilege(s) for this operation"), + ERR_LOCAL_VARIABLE(1228, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is a SESSION variable and can't be " + + "used with SET GLOBAL"), + ERR_GLOBAL_VARIABLE(1229, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is a GLOBAL variable and should be " + + "set with SET GLOBAL"), ERR_NO_DEFAULT(1230, new byte[]{'4', '2', '0', '0', '0'}, "Variable '%s' doesn't have a default value"), - ERR_WRONG_VALUE_FOR_VAR(1231, new byte[]{'4', '2', '0', '0', '0'}, "Variable '%s' can't be set to the value of " + - "'%s'"), + ERR_WRONG_VALUE_FOR_VAR(1231, new byte[]{'4', '2', '0', '0', '0'}, "Variable '%s' can't be set to the value of " + + "'%s'"), ERR_WRONG_TYPE_FOR_VAR(1232, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect argument type to variable '%s'"), ERR_VAR_CANT_BE_READ(1233, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' can only be set, not read"), ERR_CANT_USE_OPTION_HERE(1234, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect usage/placement of '%s'"), - ERR_NOT_SUPPORTED_YET(1235, new byte[]{'4', '2', '0', '0', '0'}, "This version of MariaDB doesn't yet support " + - "'%s'"), - ERR_MASTER_FATAL_ERROR_READING_BINLOG(1236, new byte[]{'H', 'Y', '0', '0', '0'}, "Got fatal error %d from master " + - "when reading data from binary log: '%s'"), - ERR_SLAVE_IGNORED_TABLE(1237, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave SQL thread ignored the query because of" + - " replicate-*-table rules"), + ERR_NOT_SUPPORTED_YET(1235, new byte[]{'4', '2', '0', '0', '0'}, "This version of MariaDB doesn't yet support " + + "'%s'"), + ERR_MASTER_FATAL_ERROR_READING_BINLOG(1236, new byte[]{'H', 'Y', '0', '0', '0'}, "Got fatal error %d from master " + + "when reading data from binary log: '%s'"), + ERR_SLAVE_IGNORED_TABLE(1237, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave SQL thread ignored the query because of" + + " replicate-*-table rules"), ERR_INCORRECT_GLOBAL_LOCAL_VAR(1238, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is a %s variable"), ERR_WRONG_FK_DEF(1239, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect foreign key definition for '%s': %s"), - ERR_KEY_REF_DO_NOT_MATCH_TABLE_REF(1240, new byte[]{'H', 'Y', '0', '0', '0'}, "Key reference and table reference " + - "don't match"), + ERR_KEY_REF_DO_NOT_MATCH_TABLE_REF(1240, new byte[]{'H', 'Y', '0', '0', '0'}, "Key reference and table reference " + + "don't match"), ERR_OPERAND_COLUMNS(1241, new byte[]{'2', '1', '0', '0', '0'}, "Operand should contain %d column(s)"), ERR_SUBQUERY_NO_1_ROW(1242, new byte[]{'2', '1', '0', '0', '0'}, "Subquery returns more than 1 row"), - ERR_UNKNOWN_STMT_HANDLER(1243, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown prepared statement handler (%.*s) " + - "given to %s"), + ERR_UNKNOWN_STMT_HANDLER(1243, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown prepared statement handler (%.*s) " + + "given to %s"), ERR_CORRUPT_HELP_DB(1244, new byte[]{'H', 'Y', '0', '0', '0'}, "Help database is corrupt or does not exist"), ERR_CYCLIC_REFERENCE(1245, new byte[]{'H', 'Y', '0', '0', '0'}, "Cyclic reference on subqueries"), ERR_AUTO_CONVERT(1246, new byte[]{'H', 'Y', '0', '0', '0'}, "Converting column '%s' from %s to %s"), ERR_ILLEGAL_REFERENCE(1247, new byte[]{'4', '2', 'S', '2', '2'}, "Reference '%s' not supported (%s)"), - ERR_DERIVED_MUST_HAVE_ALIAS(1248, new byte[]{'4', '2', '0', '0', '0'}, "Every derived table must have its own " + - "alias"), + ERR_DERIVED_MUST_HAVE_ALIAS(1248, new byte[]{'4', '2', '0', '0', '0'}, "Every derived table must have its own " + + "alias"), ERR_SELECT_REDUCED(1249, new byte[]{'0', '1', '0', '0', '0'}, "Select %u was reduced during optimization"), - ERR_TABLENAME_NOT_ALLOWED_HERE(1250, new byte[]{'4', '2', '0', '0', '0'}, "Table '%s' from one of the SELECTs " + - "cannot be used in %s"), - ERR_NOT_SUPPORTED_AUTH_MODE(1251, new byte[]{'0', '8', '0', '0', '4'}, "Client does not support authentication " + - "protocol requested by server; consider upgrading MariaDB client"), - ERR_SPATIAL_CANT_HAVE_NULL(1252, new byte[]{'4', '2', '0', '0', '0'}, "All parts of a SPATIAL index must be NOT " + - "NULL"), - ERR_COLLATION_CHARSET_MISMATCH(1253, new byte[]{'4', '2', '0', '0', '0'}, "COLLATION '%s' is not valid for " + - "CHARACTER SET '%s'"), + ERR_TABLENAME_NOT_ALLOWED_HERE(1250, new byte[]{'4', '2', '0', '0', '0'}, "Table '%s' from one of the SELECTs " + + "cannot be used in %s"), + ERR_NOT_SUPPORTED_AUTH_MODE(1251, new byte[]{'0', '8', '0', '0', '4'}, "Client does not support authentication " + + "protocol requested by server; consider upgrading MariaDB client"), + ERR_SPATIAL_CANT_HAVE_NULL(1252, new byte[]{'4', '2', '0', '0', '0'}, "All parts of a SPATIAL index must be NOT " + + "NULL"), + ERR_COLLATION_CHARSET_MISMATCH(1253, new byte[]{'4', '2', '0', '0', '0'}, "COLLATION '%s' is not valid for " + + "CHARACTER SET '%s'"), ERR_SLAVE_WAS_RUNNING(1254, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave is already running"), ERR_SLAVE_WAS_NOT_RUNNING(1255, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave already has been stopped"), - ERR_TOO_BIG_FOR_UNCOMPRESS(1256, new byte[]{'H', 'Y', '0', '0', '0'}, "Uncompressed data size too large; the " + - "maximum size is %d (probably, length of uncompressed data was corrupted)"), + ERR_TOO_BIG_FOR_UNCOMPRESS(1256, new byte[]{'H', 'Y', '0', '0', '0'}, "Uncompressed data size too large; the " + + "maximum size is %d (probably, length of uncompressed data was corrupted)"), ERR_ZLIB_Z_MEM_ERROR(1257, new byte[]{'H', 'Y', '0', '0', '0'}, "ZLIB: Not enough memory"), - ERR_ZLIB_Z_BUF_ERROR(1258, new byte[]{'H', 'Y', '0', '0', '0'}, "ZLIB: Not enough room in the output buffer " + - "(probably, length of uncompressed data was corrupted)"), + ERR_ZLIB_Z_BUF_ERROR(1258, new byte[]{'H', 'Y', '0', '0', '0'}, "ZLIB: Not enough room in the output buffer " + + "(probably, length of uncompressed data was corrupted)"), ERR_ZLIB_Z_DATA_ERROR(1259, new byte[]{'H', 'Y', '0', '0', '0'}, "ZLIB: Input data corrupted"), ERR_CUT_VALUE_GROUP_CONCAT(1260, new byte[]{'H', 'Y', '0', '0', '0'}, "Row %u was cut by GROUP_CONCAT()"), ERR_WARN_TOO_FEW_RECORDS(1261, new byte[]{'0', '1', '0', '0', '0'}, "Row %d doesn't contain data for all columns"), - ERR_WARN_TOO_MANY_RECORDS(1262, new byte[]{'0', '1', '0', '0', '0'}, "Row %d was truncated; it contained more " + - "data than there were input columns"), - ERR_WARN_NULL_TO_NOTNULL(1263, new byte[]{'2', '2', '0', '0', '4'}, "Column set to default value; NULL supplied " + - "to NOT NULL column '%s' at row %d"), - ERR_WARN_DATA_OUT_OF_RANGE(1264, new byte[]{'2', '2', '0', '0', '3'}, "Out of range value for column '%s' at row " + - "%d"), + ERR_WARN_TOO_MANY_RECORDS(1262, new byte[]{'0', '1', '0', '0', '0'}, "Row %d was truncated; it contained more " + + "data than there were input columns"), + ERR_WARN_NULL_TO_NOTNULL(1263, new byte[]{'2', '2', '0', '0', '4'}, "Column set to default value; NULL supplied " + + "to NOT NULL column '%s' at row %d"), + ERR_WARN_DATA_OUT_OF_RANGE(1264, new byte[]{'2', '2', '0', '0', '3'}, "Out of range value for column '%s' at row " + + "%d"), WARN_DATA_TRUNCATED(1265, new byte[]{'0', '1', '0', '0', '0'}, "Data truncated for column '%s' at row %d"), ERR_WARN_USING_OTHER_HANDLER(1266, new byte[]{'H', 'Y', '0', '0', '0'}, "Using storage engine %s for table '%s'"), - ERR_CANT_AGGREGATE_2COLLATIONS(1267, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations (%s,%s) and " + - "(%s,%s) for operation '%s'"), + ERR_CANT_AGGREGATE_2COLLATIONS(1267, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations (%s,%s) and " + + "(%s,%s) for operation '%s'"), ERR_DROP_USER(1268, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop one or more of the requested users"), - ERR_REVOKE_GRANTS(1269, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't revoke all privileges for one or more of the " + - "requested users"), - ERR_CANT_AGGREGATE_3COLLATIONS(1270, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations (%s,%s), " + - "(%s,%s), (%s,%s) for operation '%s'"), - ERR_CANT_AGGREGATE_NCOLLATIONS(1271, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations for " + - "operation '%s'"), - ERR_VARIABLE_IS_NOT_STRUCT(1272, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is not a variable component " + - "(can't be used as XXXX.variable_name)"), + ERR_REVOKE_GRANTS(1269, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't revoke all privileges for one or more of the " + + "requested users"), + ERR_CANT_AGGREGATE_3COLLATIONS(1270, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations (%s,%s), " + + "(%s,%s), (%s,%s) for operation '%s'"), + ERR_CANT_AGGREGATE_NCOLLATIONS(1271, new byte[]{'H', 'Y', '0', '0', '0'}, "Illegal mix of collations for " + + "operation '%s'"), + ERR_VARIABLE_IS_NOT_STRUCT(1272, new byte[]{'H', 'Y', '0', '0', '0'}, "Variable '%s' is not a variable component " + + "(can't be used as XXXX.variable_name)"), ERR_UNKNOWN_COLLATION(1273, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown collation: '%s'"), - ERR_SLAVE_IGNORED_SSL_PARAMS(1274, new byte[]{'H', 'Y', '0', '0', '0'}, "SSL parameters in CHANGE MASTER are " + - "ignored because this MariaDB slave was compiled without SSL support; they can be used later if MariaDB " + - "slave " + - "with SSL is started"), - ERR_SERVER_IS_IN_SECURE_AUTH_MODE(1275, new byte[]{'H', 'Y', '0', '0', '0'}, "Server is running in --secure-auth " + - "mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"), - ERR_WARN_FIELD_RESOLVED(1276, new byte[]{'H', 'Y', '0', '0', '0'}, "Field or reference '%s%s%s%s%s' of SELECT #%d" + - " was resolved in SELECT #%d"), - ERR_BAD_SLAVE_UNTIL_COND(1277, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect parameter or combination of " + - "parameters for START SLAVE UNTIL"), - ERR_MISSING_SKIP_SLAVE(1278, new byte[]{'H', 'Y', '0', '0', '0'}, "It is recommended to use --skip-slave-start " + - "when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get " + - "an " + - "unexpected slave's mysqld restart"), - ERR_UNTIL_COND_IGNORED(1279, new byte[]{'H', 'Y', '0', '0', '0'}, "SQL thread is not to be started so UNTIL " + - "options are ignored"), + ERR_SLAVE_IGNORED_SSL_PARAMS(1274, new byte[]{'H', 'Y', '0', '0', '0'}, "SSL parameters in CHANGE MASTER are " + + "ignored because this MariaDB slave was compiled without SSL support; they can be used later if MariaDB " + + "slave " + + "with SSL is started"), + ERR_SERVER_IS_IN_SECURE_AUTH_MODE(1275, new byte[]{'H', 'Y', '0', '0', '0'}, "Server is running in --secure-auth " + + "mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"), + ERR_WARN_FIELD_RESOLVED(1276, new byte[]{'H', 'Y', '0', '0', '0'}, "Field or reference '%s%s%s%s%s' of SELECT #%d" + + " was resolved in SELECT #%d"), + ERR_BAD_SLAVE_UNTIL_COND(1277, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect parameter or combination of " + + "parameters for START SLAVE UNTIL"), + ERR_MISSING_SKIP_SLAVE(1278, new byte[]{'H', 'Y', '0', '0', '0'}, "It is recommended to use --skip-slave-start " + + "when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get " + + "an " + + "unexpected slave's mysqld restart"), + ERR_UNTIL_COND_IGNORED(1279, new byte[]{'H', 'Y', '0', '0', '0'}, "SQL thread is not to be started so UNTIL " + + "options are ignored"), ERR_WRONG_NAME_FOR_INDEX(1280, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect index name '%s'"), ERR_WRONG_NAME_FOR_CATALOG(1281, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect catalog name '%s'"), - ERR_WARN_QC_RESIZE(1282, new byte[]{'H', 'Y', '0', '0', '0'}, "Query cache failed to set size %d; new query " + - "cache size is %d"), + ERR_WARN_QC_RESIZE(1282, new byte[]{'H', 'Y', '0', '0', '0'}, "Query cache failed to set size %d; new query " + + "cache size is %d"), ERR_BAD_FT_COLUMN(1283, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' cannot be part of FULLTEXT index"), ERR_UNKNOWN_KEY_CACHE(1284, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown key cache '%s'"), - ERR_WARN_HOSTNAME_WONT_WORK(1285, new byte[]{'H', 'Y', '0', '0', '0'}, "MariaDB is started in --skip-name-resolve" + - " mode; you must restart it without this switch for this grant to work"), + ERR_WARN_HOSTNAME_WONT_WORK(1285, new byte[]{'H', 'Y', '0', '0', '0'}, "MariaDB is started in --skip-name-resolve" + + " mode; you must restart it without this switch for this grant to work"), ERR_UNKNOWN_STORAGE_ENGINE(1286, new byte[]{'4', '2', '0', '0', '0'}, "Unknown storage engine '%s'"), - ERR_WARN_DEPRECATED_SYNTAX(1287, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' is deprecated and will be removed in " + - "a future release. Please use %s instead"), - ERR_NON_UPDATABLE_TABLE(1288, new byte[]{'H', 'Y', '0', '0', '0'}, "The target table %s of the %s is not " + - "updatable"), - ERR_FEATURE_DISABLED(1289, new byte[]{'H', 'Y', '0', '0', '0'}, "The '%s' feature is disabled; you need MariaDB " + - "built with '%s' to have it working"), - ERR_OPTION_PREVENTS_STATEMENT(1290, new byte[]{'H', 'Y', '0', '0', '0'}, "The MariaDB server is running with the " + - "%s option so it cannot execute this statement"), - ERR_DUPLICATED_VALUE_IN_TYPE(1291, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' has duplicated value '%s' in" + - " %s"), + ERR_WARN_DEPRECATED_SYNTAX(1287, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' is deprecated and will be removed in " + + "a future release. Please use %s instead"), + ERR_NON_UPDATABLE_TABLE(1288, new byte[]{'H', 'Y', '0', '0', '0'}, "The target table %s of the %s is not " + + "updatable"), + ERR_FEATURE_DISABLED(1289, new byte[]{'H', 'Y', '0', '0', '0'}, "The '%s' feature is disabled; you need MariaDB " + + "built with '%s' to have it working"), + ERR_OPTION_PREVENTS_STATEMENT(1290, new byte[]{'H', 'Y', '0', '0', '0'}, "The MariaDB server is running with the " + + "%s option so it cannot execute this statement"), + ERR_DUPLICATED_VALUE_IN_TYPE(1291, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' has duplicated value '%s' in" + + " %s"), ERR_TRUNCATED_WRONG_VALUE(1292, new byte[]{'2', '2', '0', '0', '7'}, "Truncated incorrect %s value: '%s'"), - ERR_TOO_MUCH_AUTO_TIMESTAMP_COLS(1293, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect table definition; there " + - "can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"), + ERR_TOO_MUCH_AUTO_TIMESTAMP_COLS(1293, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect table definition; there " + + "can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"), ERR_INVALID_ON_UPDATE(1294, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid ON UPDATE clause for '%s' column"), - ERR_UNSUPPORTED_PS(1295, new byte[]{'H', 'Y', '0', '0', '0'}, "This command is not supported in the prepared " + - "statement protocol yet"), + ERR_UNSUPPORTED_PS(1295, new byte[]{'H', 'Y', '0', '0', '0'}, "This command is not supported in the prepared " + + "statement protocol yet"), ERR_GET_ERRMSG(1296, new byte[]{'H', 'Y', '0', '0', '0'}, "Got error %d '%s' from %s"), ERR_GET_TEMPORARY_ERRMSG(1297, new byte[]{'H', 'Y', '0', '0', '0'}, "Got temporary error %d '%s' from %s"), ERR_UNKNOWN_TIME_ZONE(1298, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown or incorrect time zone: '%s'"), - ERR_WARN_INVALID_TIMESTAMP(1299, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid TIMESTAMP value in column '%s' at " + - "row %d"), + ERR_WARN_INVALID_TIMESTAMP(1299, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid TIMESTAMP value in column '%s' at " + + "row %d"), ERR_INVALID_CHARACTER_STRING(1300, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid %s character string: '%s'"), - ERR_WARN_ALLOWED_PACKET_OVERFLOWED(1301, new byte[]{'H', 'Y', '0', '0', '0'}, "Result of %s() was larger than " + - "max_allowed_packet (%d) - truncated"), - ERR_CONFLICTING_DECLARATIONS(1302, new byte[]{'H', 'Y', '0', '0', '0'}, "Conflicting declarations: '%s%s' and " + - "'%s%s'"), - ERR_SP_NO_RECURSIVE_CREATE(1303, new byte[]{'2', 'F', '0', '0', '3'}, "Can't create a %s from within another " + - "stored routine"), + ERR_WARN_ALLOWED_PACKET_OVERFLOWED(1301, new byte[]{'H', 'Y', '0', '0', '0'}, "Result of %s() was larger than " + + "max_allowed_packet (%d) - truncated"), + ERR_CONFLICTING_DECLARATIONS(1302, new byte[]{'H', 'Y', '0', '0', '0'}, "Conflicting declarations: '%s%s' and " + + "'%s%s'"), + ERR_SP_NO_RECURSIVE_CREATE(1303, new byte[]{'2', 'F', '0', '0', '3'}, "Can't create a %s from within another " + + "stored routine"), ERR_SP_ALREADY_EXISTS(1304, new byte[]{'4', '2', '0', '0', '0'}, "%s %s already exists"), ERR_SP_DOES_NOT_EXIST(1305, new byte[]{'4', '2', '0', '0', '0'}, "%s %s does not exist"), ERR_SP_DROP_FAILED(1306, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to DROP %s %s"), @@ -477,20 +477,20 @@ public enum ErrorCode { ERR_SP_LABEL_REDEFINE(1309, new byte[]{'4', '2', '0', '0', '0'}, "Redefining label %s"), ERR_SP_LABEL_MISMATCH(1310, new byte[]{'4', '2', '0', '0', '0'}, "End-label %s without match"), ERR_SP_UNINIT_VAR(1311, new byte[]{'0', '1', '0', '0', '0'}, "Referring to uninitialized variable %s"), - ERR_SP_BADSELECT(1312, new byte[]{'0', 'A', '0', '0', '0'}, "PROCEDURE %s can't return a result set in the given " + - "context"), + ERR_SP_BADSELECT(1312, new byte[]{'0', 'A', '0', '0', '0'}, "PROCEDURE %s can't return a result set in the given " + + "context"), ERR_SP_BADRETURN(1313, new byte[]{'4', '2', '0', '0', '0'}, "RETURN is only allowed in a FUNCTION"), ERR_SP_BADSTATEMENT(1314, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not allowed in stored procedures"), - ERR_UPDATE_LOG_DEPRECATED_IGNORED(1315, new byte[]{'4', '2', '0', '0', '0'}, "The update log is deprecated and " + - "replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB " + - "5.6."), - ERR_UPDATE_LOG_DEPRECATED_TRANSLATED(1316, new byte[]{'4', '2', '0', '0', '0'}, "The update log is deprecated and" + - " replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN. This option will" + - " be " + - "removed in MariaDB 5.6."), + ERR_UPDATE_LOG_DEPRECATED_IGNORED(1315, new byte[]{'4', '2', '0', '0', '0'}, "The update log is deprecated and " + + "replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB " + + "5.6."), + ERR_UPDATE_LOG_DEPRECATED_TRANSLATED(1316, new byte[]{'4', '2', '0', '0', '0'}, "The update log is deprecated and" + + " replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN. This option will" + + " be " + + "removed in MariaDB 5.6."), ERR_QUERY_INTERRUPTED(1317, new byte[]{'7', '0', '1', '0', '0'}, "Query execution was interrupted"), - ERR_SP_WRONG_NO_OF_ARGS(1318, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect number of arguments for %s %s; " + - "expected %u, got %u"), + ERR_SP_WRONG_NO_OF_ARGS(1318, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect number of arguments for %s %s; " + + "expected %u, got %u"), ERR_SP_COND_MISMATCH(1319, new byte[]{'4', '2', '0', '0', '0'}, "Undefined CONDITION: %s"), ERR_SP_NORETURN(1320, new byte[]{'4', '2', '0', '0', '0'}, "No RETURN found in FUNCTION %s"), ERR_SP_NORETURNEND(1321, new byte[]{'2', 'F', '0', '0', '5'}, "FUNCTION %s ended without RETURN"), @@ -501,514 +501,514 @@ public enum ErrorCode { ERR_SP_CURSOR_NOT_OPEN(1326, new byte[]{'2', '4', '0', '0', '0'}, "Cursor is not open"), ERR_SP_UNDECLARED_VAR(1327, new byte[]{'4', '2', '0', '0', '0'}, "Undeclared variable: %s"), ERR_SP_WRONG_NO_OF_FETCH_ARGS(1328, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect number of FETCH variables"), - ERR_SP_FETCH_NO_DATA(1329, new byte[]{'0', '2', '0', '0', '0'}, "No data - zero rows fetched, selected, or " + - "processed"), + ERR_SP_FETCH_NO_DATA(1329, new byte[]{'0', '2', '0', '0', '0'}, "No data - zero rows fetched, selected, or " + + "processed"), ERR_SP_DUP_PARAM(1330, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate parameter: %s"), ERR_SP_DUP_VAR(1331, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate variable: %s"), ERR_SP_DUP_COND(1332, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate condition: %s"), ERR_SP_DUP_CURS(1333, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate cursor: %s"), ERR_SP_CANT_ALTER(1334, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to ALTER %s %s"), ERR_SP_SUBSELECT_NYI(1335, new byte[]{'0', 'A', '0', '0', '0'}, "Subquery value not supported"), - ERR_STMT_NOT_ALLOWED_IN_SF_OR_TRG(1336, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not allowed in stored " + - "function or trigger"), - ERR_SP_VARCOND_AFTER_CURSHNDLR(1337, new byte[]{'4', '2', '0', '0', '0'}, "Variable or condition declaration " + - "after cursor or handler declaration"), - ERR_SP_CURSOR_AFTER_HANDLER(1338, new byte[]{'4', '2', '0', '0', '0'}, "Cursor declaration after handler " + - "declaration"), + ERR_STMT_NOT_ALLOWED_IN_SF_OR_TRG(1336, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not allowed in stored " + + "function or trigger"), + ERR_SP_VARCOND_AFTER_CURSHNDLR(1337, new byte[]{'4', '2', '0', '0', '0'}, "Variable or condition declaration " + + "after cursor or handler declaration"), + ERR_SP_CURSOR_AFTER_HANDLER(1338, new byte[]{'4', '2', '0', '0', '0'}, "Cursor declaration after handler " + + "declaration"), ERR_SP_CASE_NOT_FOUND(1339, new byte[]{'2', '0', '0', '0', '0'}, "Case not found for CASE statement"), ERR_FPARSER_TOO_BIG_FILE(1340, new byte[]{'H', 'Y', '0', '0', '0'}, "Configuration file '%s' is too big"), ERR_FPARSER_BAD_HEADER(1341, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed file type header in file '%s'"), - ERR_FPARSER_EOF_IN_COMMENT(1342, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected end of file while parsing " + - "comment '%s'"), - ERR_FPARSER_ERROR_IN_PARAMETER(1343, new byte[]{'H', 'Y', '0', '0', '0'}, "Error while parsing parameter '%s' " + - "(line: '%s')"), - ERR_FPARSER_EOF_IN_UNKNOWN_PARAMETER(1344, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected end of file while " + - "skipping unknown parameter '%s'"), - ERR_VIEW_NO_EXPLAIN(1345, new byte[]{'H', 'Y', '0', '0', '0'}, "EXPLAIN/SHOW can not be issued; lacking " + - "privileges for underlying table"), + ERR_FPARSER_EOF_IN_COMMENT(1342, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected end of file while parsing " + + "comment '%s'"), + ERR_FPARSER_ERROR_IN_PARAMETER(1343, new byte[]{'H', 'Y', '0', '0', '0'}, "Error while parsing parameter '%s' " + + "(line: '%s')"), + ERR_FPARSER_EOF_IN_UNKNOWN_PARAMETER(1344, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected end of file while " + + "skipping unknown parameter '%s'"), + ERR_VIEW_NO_EXPLAIN(1345, new byte[]{'H', 'Y', '0', '0', '0'}, "EXPLAIN/SHOW can not be issued; lacking " + + "privileges for underlying table"), ERR_FRM_UNKNOWN_TYPE(1346, new byte[]{'H', 'Y', '0', '0', '0'}, "File '%s' has unknown type '%s' in its header"), ERR_WRONG_OBJECT(1347, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s.%s' is not %s"), ERR_NONUPDATEABLE_COLUMN(1348, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' is not updatable"), - ERR_VIEW_SELECT_DERIVED(1349, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT contains a subquery in the FROM" + - " clause"), + ERR_VIEW_SELECT_DERIVED(1349, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT contains a subquery in the FROM" + + " clause"), ERR_VIEW_SELECT_CLAUSE(1350, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT contains a '%s' clause"), - ERR_VIEW_SELECT_VARIABLE(1351, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT contains a variable or " + - "parameter"), - ERR_VIEW_SELECT_TMPTABLE(1352, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT refers to a temporary table " + - "'%s'"), - ERR_VIEW_WRONG_LIST(1353, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT and view's field list have " + - "different column counts"), - ERR_WARN_VIEW_MERGE(1354, new byte[]{'H', 'Y', '0', '0', '0'}, "View merge algorithm can't be used here for now " + - "(assumed undefined algorithm)"), - ERR_WARN_VIEW_WITHOUT_KEY(1355, new byte[]{'H', 'Y', '0', '0', '0'}, "View being updated does not have complete " + - "key of underlying table in it"), - ERR_VIEW_INVALID(1356, new byte[]{'H', 'Y', '0', '0', '0'}, "View '%s.%s' references invalid table(s) or column" + - "(s) or function(s) or definer/invoker of view lack rights to use them"), - ERR_SP_NO_DROP_SP(1357, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't drop or alter a %s from within another stored" + - " routine"), - ERR_SP_GOTO_IN_HNDLR(1358, new byte[]{'H', 'Y', '0', '0', '0'}, "GOTO is not allowed in a stored procedure " + - "handler"), + ERR_VIEW_SELECT_VARIABLE(1351, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT contains a variable or " + + "parameter"), + ERR_VIEW_SELECT_TMPTABLE(1352, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT refers to a temporary table " + + "'%s'"), + ERR_VIEW_WRONG_LIST(1353, new byte[]{'H', 'Y', '0', '0', '0'}, "View's SELECT and view's field list have " + + "different column counts"), + ERR_WARN_VIEW_MERGE(1354, new byte[]{'H', 'Y', '0', '0', '0'}, "View merge algorithm can't be used here for now " + + "(assumed undefined algorithm)"), + ERR_WARN_VIEW_WITHOUT_KEY(1355, new byte[]{'H', 'Y', '0', '0', '0'}, "View being updated does not have complete " + + "key of underlying table in it"), + ERR_VIEW_INVALID(1356, new byte[]{'H', 'Y', '0', '0', '0'}, "View '%s.%s' references invalid table(s) or column" + + "(s) or function(s) or definer/invoker of view lack rights to use them"), + ERR_SP_NO_DROP_SP(1357, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't drop or alter a %s from within another stored" + + " routine"), + ERR_SP_GOTO_IN_HNDLR(1358, new byte[]{'H', 'Y', '0', '0', '0'}, "GOTO is not allowed in a stored procedure " + + "handler"), ERR_TRG_ALREADY_EXISTS(1359, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger already exists"), ERR_TRG_DOES_NOT_EXIST(1360, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger does not exist"), - ERR_TRG_ON_VIEW_OR_TEMP_TABLE(1361, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger's '%s' is view or temporary " + - "table"), - ERR_TRG_CANT_CHANGE_ROW(1362, new byte[]{'H', 'Y', '0', '0', '0'}, "Updating of %s row is not allowed in " + - "%strigger"), + ERR_TRG_ON_VIEW_OR_TEMP_TABLE(1361, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger's '%s' is view or temporary " + + "table"), + ERR_TRG_CANT_CHANGE_ROW(1362, new byte[]{'H', 'Y', '0', '0', '0'}, "Updating of %s row is not allowed in " + + "%strigger"), ERR_TRG_NO_SUCH_ROW_IN_TRG(1363, new byte[]{'H', 'Y', '0', '0', '0'}, "There is no %s row in %s trigger"), ERR_NO_DEFAULT_FOR_FIELD(1364, new byte[]{'H', 'Y', '0', '0', '0'}, "Field '%s' doesn't have a default value"), ERR_DIVISION_BY_ZER(1365, new byte[]{'2', '2', '0', '1', '2'}, "Division by 0"), - ERR_TRUNCATED_WRONG_VALUE_FOR_FIELD(1366, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect %s value: '%s' for " + - "column '%s' at row %d"), + ERR_TRUNCATED_WRONG_VALUE_FOR_FIELD(1366, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect %s value: '%s' for " + + "column '%s' at row %d"), ERR_ILLEGAL_VALUE_FOR_TYPE(1367, new byte[]{'2', '2', '0', '0', '7'}, "Illegal %s '%s' value found during parsing"), ERR_VIEW_NONUPD_CHECK(1368, new byte[]{'H', 'Y', '0', '0', '0'}, "CHECK OPTION on non-updatable view '%s.%s'"), ERR_VIEW_CHECK_FAILED(1369, new byte[]{'H', 'Y', '0', '0', '0'}, "CHECK OPTION failed '%s.%s'"), - ERR_PROCACCESS_DENIED_ERROR(1370, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for " + - "routine '%s'"), + ERR_PROCACCESS_DENIED_ERROR(1370, new byte[]{'4', '2', '0', '0', '0'}, "%s command denied to user '%s'@'%s' for " + + "routine '%s'"), ERR_RELAY_LOG_FAIL(1371, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed purging old relay logs: %s"), - ERR_PASSWD_LENGTH(1372, new byte[]{'H', 'Y', '0', '0', '0'}, "Password hash should be a %d-digit hexadecimal " + - "number"), + ERR_PASSWD_LENGTH(1372, new byte[]{'H', 'Y', '0', '0', '0'}, "Password hash should be a %d-digit hexadecimal " + + "number"), ERR_UNKNOWN_TARGET_BINLOG(1373, new byte[]{'H', 'Y', '0', '0', '0'}, "Target log not found in binlog index"), ERR_IO_ERR_LOG_INDEX_READ(1374, new byte[]{'H', 'Y', '0', '0', '0'}, "I/O error reading log index file"), - ERR_BINLOG_PURGE_PROHIBITED(1375, new byte[]{'H', 'Y', '0', '0', '0'}, "Server configuration does not permit " + - "binlog purge"), + ERR_BINLOG_PURGE_PROHIBITED(1375, new byte[]{'H', 'Y', '0', '0', '0'}, "Server configuration does not permit " + + "binlog purge"), ERR_FSEEK_FAIL(1376, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed on fseek()"), ERR_BINLOG_PURGE_FATAL_ERR(1377, new byte[]{'H', 'Y', '0', '0', '0'}, "Fatal error during log purge"), ERR_LOG_IN_USE(1378, new byte[]{'H', 'Y', '0', '0', '0'}, "A purgeable log is in use, will not purge"), ERR_LOG_PURGE_UNKNOWN_ERR(1379, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown error during log purge"), ERR_RELAY_LOG_INIT(1380, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed initializing relay log position: %s"), ERR_NO_BINARY_LOGGING(1381, new byte[]{'H', 'Y', '0', '0', '0'}, "You are not using binary logging"), - ERR_RESERVED_SYNTAX(1382, new byte[]{'H', 'Y', '0', '0', '0'}, "The '%s' syntax is reserved for purposes internal" + - " to the MariaDB server"), + ERR_RESERVED_SYNTAX(1382, new byte[]{'H', 'Y', '0', '0', '0'}, "The '%s' syntax is reserved for purposes internal" + + " to the MariaDB server"), ERR_WSAS_FAILED(1383, new byte[]{'H', 'Y', '0', '0', '0'}, "WSAStartup Failed"), - ERR_DIFF_GROUPS_PROC(1384, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't handle procedures with different groups " + - "yet"), + ERR_DIFF_GROUPS_PROC(1384, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't handle procedures with different groups " + + "yet"), ERR_NO_GROUP_FOR_PROC(1385, new byte[]{'H', 'Y', '0', '0', '0'}, "Select must have a group with this procedure"), ERR_ORDER_WITH_PROC(1386, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't use ORDER clause with this procedure"), - ERR_LOGGING_PROHIBIT_CHANGING_OF(1387, new byte[]{'H', 'Y', '0', '0', '0'}, "Binary logging and replication " + - "forbid changing the global server %s"), + ERR_LOGGING_PROHIBIT_CHANGING_OF(1387, new byte[]{'H', 'Y', '0', '0', '0'}, "Binary logging and replication " + + "forbid changing the global server %s"), ERR_NO_FILE_MAPPING(1388, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't map file: %s, errno: %d"), ERR_WRONG_MAGIC(1389, new byte[]{'H', 'Y', '0', '0', '0'}, "Wrong magic in %s"), ERR_PS_MANY_PARAM(1390, new byte[]{'H', 'Y', '0', '0', '0'}, "Prepared statement contains too many placeholders"), ERR_KEY_PART_0(1391, new byte[]{'H', 'Y', '0', '0', '0'}, "Key part '%s' length cannot be 0"), ERR_VIEW_CHECKSUM(1392, new byte[]{'H', 'Y', '0', '0', '0'}, "View text checksum failed"), - ERR_VIEW_MULTIUPDATE(1393, new byte[]{'H', 'Y', '0', '0', '0'}, "Can not modify more than one base table through " + - "a join view '%s.%s'"), - ERR_VIEW_NO_INSERT_FIELD_LIST(1394, new byte[]{'H', 'Y', '0', '0', '0'}, "Can not insert into join view '%s.%s' " + - "without fields list"), + ERR_VIEW_MULTIUPDATE(1393, new byte[]{'H', 'Y', '0', '0', '0'}, "Can not modify more than one base table through " + + "a join view '%s.%s'"), + ERR_VIEW_NO_INSERT_FIELD_LIST(1394, new byte[]{'H', 'Y', '0', '0', '0'}, "Can not insert into join view '%s.%s' " + + "without fields list"), ERR_VIEW_DELETE_MERGE_VIEW(1395, new byte[]{'H', 'Y', '0', '0', '0'}, "Can not delete from join view '%s.%s'"), ERR_CANNOT_USER(1396, new byte[]{'H', 'Y', '0', '0', '0'}, "Operation %s failed for %s"), ERR_XAER_NOTA(1397, new byte[]{'X', 'A', 'E', '0', '4'}, "XAER_NOTA: Unknown XID"), ERR_XAER_INVAL(1398, new byte[]{'X', 'A', 'E', '0', '5'}, "XAER_INVAL: Invalid arguments (or unsupported command)"), - ERR_XAER_RMFAIL(1399, new byte[]{'X', 'A', 'E', '0', '7'}, "XAER_RMFAIL: The command cannot be executed when " + - "global transaction is in the %s state"), - ERR_XAER_OUTSIDE(1400, new byte[]{'X', 'A', 'E', '0', '9'}, "XAER_OUTSIDE: Some work is done outside global " + - "transaction"), - ERR_XAER_RMERR(1401, new byte[]{'X', 'A', 'E', '0', '3'}, "XAER_RMERR: Fatal error occurred in the transaction " + - "branch - check your data for consistency"), + ERR_XAER_RMFAIL(1399, new byte[]{'X', 'A', 'E', '0', '7'}, "XAER_RMFAIL: The command cannot be executed when " + + "global transaction is in the %s state"), + ERR_XAER_OUTSIDE(1400, new byte[]{'X', 'A', 'E', '0', '9'}, "XAER_OUTSIDE: Some work is done outside global " + + "transaction"), + ERR_XAER_RMERR(1401, new byte[]{'X', 'A', 'E', '0', '3'}, "XAER_RMERR: Fatal error occurred in the transaction " + + "branch - check your data for consistency"), ERR_XA_RBROLLBACK(1402, new byte[]{'X', 'A', '1', '0', '0'}, "XA_RBROLLBACK: Transaction branch was rolled back"), - ERR_NONEXISTING_PROC_GRANT(1403, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user " + - "'%s' on host '%s' on routine '%s'"), - ERR_PROC_AUTO_GRANT_FAIL(1404, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to grant EXECUTE and ALTER ROUTINE " + - "privileges"), - ERR_PROC_AUTO_REVOKE_FAIL(1405, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to revoke all privileges to dropped " + - "routine"), + ERR_NONEXISTING_PROC_GRANT(1403, new byte[]{'4', '2', '0', '0', '0'}, "There is no such grant defined for user " + + "'%s' on host '%s' on routine '%s'"), + ERR_PROC_AUTO_GRANT_FAIL(1404, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to grant EXECUTE and ALTER ROUTINE " + + "privileges"), + ERR_PROC_AUTO_REVOKE_FAIL(1405, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to revoke all privileges to dropped " + + "routine"), ERR_DATA_TOO_LONG(1406, new byte[]{'2', '2', '0', '0', '1'}, "Data too long for column '%s' at row %d"), ERR_SP_BAD_SQLSTATE(1407, new byte[]{'4', '2', '0', '0', '0'}, "Bad SQLSTATE: '%s'"), - ERR_STARTUP(1408, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: ready for connections. Version: '%s' socket: '%s' " + - "port: %d %s"), - ERR_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR(1409, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't load value from file with " + - "fixed size rows to variable"), - ERR_CANT_CREATE_USER_WITH_GRANT(1410, new byte[]{'4', '2', '0', '0', '0'}, "You are not allowed to create a user " + - "with GRANT"), + ERR_STARTUP(1408, new byte[]{'H', 'Y', '0', '0', '0'}, "%s: ready for connections. Version: '%s' socket: '%s' " + + "port: %d %s"), + ERR_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR(1409, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't load value from file with " + + "fixed size rows to variable"), + ERR_CANT_CREATE_USER_WITH_GRANT(1410, new byte[]{'4', '2', '0', '0', '0'}, "You are not allowed to create a user " + + "with GRANT"), ERR_WRONG_VALUE_FOR_TYPE(1411, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect %s value: '%s' for function %s"), - ERR_TABLE_DEF_CHANGED(1412, new byte[]{'H', 'Y', '0', '0', '0'}, "Table definition has changed, please retry " + - "transaction"), + ERR_TABLE_DEF_CHANGED(1412, new byte[]{'H', 'Y', '0', '0', '0'}, "Table definition has changed, please retry " + + "transaction"), ERR_SP_DUP_HANDLER(1413, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate handler declared in the same block"), - ERR_SP_NOT_VAR_ARG(1414, new byte[]{'4', '2', '0', '0', '0'}, "OUT or INOUT argument %d for routine %s is not a " + - "variable or NEW pseudo-variable in BEFORE trigger"), + ERR_SP_NOT_VAR_ARG(1414, new byte[]{'4', '2', '0', '0', '0'}, "OUT or INOUT argument %d for routine %s is not a " + + "variable or NEW pseudo-variable in BEFORE trigger"), ERR_SP_NO_RETSET(1415, new byte[]{'0', 'A', '0', '0', '0'}, "Not allowed to return a result set from a %s"), - ERR_CANT_CREATE_GEOMETRY_OBJECT(1416, new byte[]{'2', '2', '0', '0', '3'}, "Cannot get geometry object from data " + - "you send to the GEOMETRY field"), - ERR_FAILED_ROUTINE_BREAK_BINLOG(1417, new byte[]{'H', 'Y', '0', '0', '0'}, "A routine failed and has neither NO " + - "SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables " + - "were " + - "updated, the binary log will miss their changes"), - ERR_BINLOG_UNSAFE_ROUTINE(1418, new byte[]{'H', 'Y', '0', '0', '0'}, "This function has none of DETERMINISTIC, NO" + - " SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the " + - "less safe" + - " log_bin_trust_function_creators variable)"), - ERR_BINLOG_CREATE_ROUTINE_NEED_SUPER(1419, new byte[]{'H', 'Y', '0', '0', '0'}, "You do not have the SUPER " + - "privilege and binary logging is enabled (you *might* want to use the less safe " + - "log_bin_trust_function_creators " + - "variable)"), - ERR_EXEC_STMT_WITH_OPEN_CURSOR(1420, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't execute a prepared statement" + - " which has an open cursor associated with it. Reset the statement to re-execute it."), + ERR_CANT_CREATE_GEOMETRY_OBJECT(1416, new byte[]{'2', '2', '0', '0', '3'}, "Cannot get geometry object from data " + + "you send to the GEOMETRY field"), + ERR_FAILED_ROUTINE_BREAK_BINLOG(1417, new byte[]{'H', 'Y', '0', '0', '0'}, "A routine failed and has neither NO " + + "SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables " + + "were " + + "updated, the binary log will miss their changes"), + ERR_BINLOG_UNSAFE_ROUTINE(1418, new byte[]{'H', 'Y', '0', '0', '0'}, "This function has none of DETERMINISTIC, NO" + + " SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the " + + "less safe" + + " log_bin_trust_function_creators variable)"), + ERR_BINLOG_CREATE_ROUTINE_NEED_SUPER(1419, new byte[]{'H', 'Y', '0', '0', '0'}, "You do not have the SUPER " + + "privilege and binary logging is enabled (you *might* want to use the less safe " + + "log_bin_trust_function_creators " + + "variable)"), + ERR_EXEC_STMT_WITH_OPEN_CURSOR(1420, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't execute a prepared statement" + + " which has an open cursor associated with it. Reset the statement to re-execute it."), ERR_STMT_HAS_NO_OPEN_CURSOR(1421, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement (%d) has no open cursor."), - ERR_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG(1422, new byte[]{'H', 'Y', '0', '0', '0'}, "Explicit or implicit commit is " + - "not allowed in stored function or trigger."), - ERR_NO_DEFAULT_FOR_VIEW_FIELD(1423, new byte[]{'H', 'Y', '0', '0', '0'}, "Field of view '%s.%s' underlying table " + - "doesn't have a default value"), - ERR_SP_NO_RECURSION(1424, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursive stored functions and triggers are not " + - "allowed."), - ERR_TOO_BIG_SCALE(1425, new byte[]{'4', '2', '0', '0', '0'}, "Too big scale %d specified for column '%s'. Maximum" + - " is %d."), - ERR_TOO_BIG_PRECISION(1426, new byte[]{'4', '2', '0', '0', '0'}, "Too big precision %d specified for column '%s'." + - " Maximum is %d."), - ERR_M_BIGGER_THAN_D(1427, new byte[]{'4', '2', '0', '0', '0'}, "For float(M,D, double(M,D or decimal(M,D, M must " + - "be >= D (column '%s')."), - ERR_WRONG_LOCK_OF_SYSTEM_TABLE(1428, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't combine write-locking of " + - "system tables with other tables or lock types"), - ERR_CONNECT_TO_FOREIGN_DATA_SOURCE(1429, new byte[]{'H', 'Y', '0', '0', '0'}, "Unable to connect to foreign data " + - "source: %s"), - ERR_QUERY_ON_FOREIGN_DATA_SOURCE(1430, new byte[]{'H', 'Y', '0', '0', '0'}, "There was a problem processing the " + - "query on the foreign data source. Data source error: %s"), - ERR_FOREIGN_DATA_SOURCE_DOESNT_EXIST(1431, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign data source you are " + - "trying to reference does not exist. Data source error: %s"), - ERR_FOREIGN_DATA_STRING_INVALID_CANT_CREATE(1432, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create federated " + - "table. The data source connection string '%s' is not in the correct format"), - ERR_FOREIGN_DATA_STRING_INVALID(1433, new byte[]{'H', 'Y', '0', '0', '0'}, "The data source connection string " + - "'%s' is not in the correct format"), - ERR_CANT_CREATE_FEDERATED_TABLE(1434, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create federated table. Foreign" + - " data src error: %s"), + ERR_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG(1422, new byte[]{'H', 'Y', '0', '0', '0'}, "Explicit or implicit commit is " + + "not allowed in stored function or trigger."), + ERR_NO_DEFAULT_FOR_VIEW_FIELD(1423, new byte[]{'H', 'Y', '0', '0', '0'}, "Field of view '%s.%s' underlying table " + + "doesn't have a default value"), + ERR_SP_NO_RECURSION(1424, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursive stored functions and triggers are not " + + "allowed."), + ERR_TOO_BIG_SCALE(1425, new byte[]{'4', '2', '0', '0', '0'}, "Too big scale %d specified for column '%s'. Maximum" + + " is %d."), + ERR_TOO_BIG_PRECISION(1426, new byte[]{'4', '2', '0', '0', '0'}, "Too big precision %d specified for column '%s'." + + " Maximum is %d."), + ERR_M_BIGGER_THAN_D(1427, new byte[]{'4', '2', '0', '0', '0'}, "For float(M,D, double(M,D or decimal(M,D, M must " + + "be >= D (column '%s')."), + ERR_WRONG_LOCK_OF_SYSTEM_TABLE(1428, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't combine write-locking of " + + "system tables with other tables or lock types"), + ERR_CONNECT_TO_FOREIGN_DATA_SOURCE(1429, new byte[]{'H', 'Y', '0', '0', '0'}, "Unable to connect to foreign data " + + "source: %s"), + ERR_QUERY_ON_FOREIGN_DATA_SOURCE(1430, new byte[]{'H', 'Y', '0', '0', '0'}, "There was a problem processing the " + + "query on the foreign data source. Data source error: %s"), + ERR_FOREIGN_DATA_SOURCE_DOESNT_EXIST(1431, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign data source you are " + + "trying to reference does not exist. Data source error: %s"), + ERR_FOREIGN_DATA_STRING_INVALID_CANT_CREATE(1432, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create federated " + + "table. The data source connection string '%s' is not in the correct format"), + ERR_FOREIGN_DATA_STRING_INVALID(1433, new byte[]{'H', 'Y', '0', '0', '0'}, "The data source connection string " + + "'%s' is not in the correct format"), + ERR_CANT_CREATE_FEDERATED_TABLE(1434, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create federated table. Foreign" + + " data src error: %s"), ERR_TRG_IN_WRONG_SCHEMA(1435, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger in wrong schema"), - ERR_STACK_OVERRUN_NEED_MORE(1436, new byte[]{'H', 'Y', '0', '0', '0'}, "Thread stack overrun: %d bytes used of a" + - " %d byte stack, and %d bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack."), + ERR_STACK_OVERRUN_NEED_MORE(1436, new byte[]{'H', 'Y', '0', '0', '0'}, "Thread stack overrun: %d bytes used of a" + + " %d byte stack, and %d bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack."), ERR_TOO_LONG_BODY(1437, new byte[]{'4', '2', '0', '0', '0'}, "Routine body for '%s' is too long"), ERR_WARN_CANT_DROP_DEFAULT_KEYCACHE(1438, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop default keycache"), - ERR_TOO_BIG_DISPLAYWIDTH(1439, new byte[]{'4', '2', '0', '0', '0'}, "Display width out of range for column '%s' " + - "(max = %d)"), + ERR_TOO_BIG_DISPLAYWIDTH(1439, new byte[]{'4', '2', '0', '0', '0'}, "Display width out of range for column '%s' " + + "(max = %d)"), ERR_XAER_DUPID(1440, new byte[]{'X', 'A', 'E', '0', '8'}, "XAER_DUPID: The XID already exists"), ERR_DATETIME_FUNCTION_OVERFLOW(1441, new byte[]{'2', '2', '0', '0', '8'}, "Datetime function: %s field overflow"), - ERR_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG(1442, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't update table '%s' in " + - "stored function/trigger because it is already used by statement which invoked this stored " + - "function/trigger."), - ERR_VIEW_PREVENT_UPDATE(1443, new byte[]{'H', 'Y', '0', '0', '0'}, "The definition of table '%s' prevents " + - "operation %s on table '%s'."), - ERR_PS_NO_RECURSION(1444, new byte[]{'H', 'Y', '0', '0', '0'}, "The prepared statement contains a stored routine " + - "call that refers to that same statement. It's not allowed to execute a prepared statement in such a " + - "recursive " + - "manner"), - ERR_SP_CANT_SET_AUTOCOMMIT(1445, new byte[]{'H', 'Y', '0', '0', '0'}, "Not allowed to set autocommit from a " + - "stored function or trigger"), + ERR_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG(1442, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't update table '%s' in " + + "stored function/trigger because it is already used by statement which invoked this stored " + + "function/trigger."), + ERR_VIEW_PREVENT_UPDATE(1443, new byte[]{'H', 'Y', '0', '0', '0'}, "The definition of table '%s' prevents " + + "operation %s on table '%s'."), + ERR_PS_NO_RECURSION(1444, new byte[]{'H', 'Y', '0', '0', '0'}, "The prepared statement contains a stored routine " + + "call that refers to that same statement. It's not allowed to execute a prepared statement in such a " + + "recursive " + + "manner"), + ERR_SP_CANT_SET_AUTOCOMMIT(1445, new byte[]{'H', 'Y', '0', '0', '0'}, "Not allowed to set autocommit from a " + + "stored function or trigger"), ERR_MALFORMED_DEFINER(1446, new byte[]{'H', 'Y', '0', '0', '0'}, "Definer is not fully qualified"), - ERR_VIEW_FRM_NO_USER(1447, new byte[]{'H', 'Y', '0', '0', '0'}, "View '%s'.'%s' has no definer information (old " + - "table format). Current user is used as definer. Please recreate the view!"), - ERR_VIEW_OTHER_USER(1448, new byte[]{'H', 'Y', '0', '0', '0'}, "You need the SUPER privilege for creation view " + - "with '%s'@'%s' definer"), - ERR_NO_SUCH_USER(1449, new byte[]{'H', 'Y', '0', '0', '0'}, "The user specified as a definer ('%s'@'%s') does not" + - " exist"), - ERR_FORBID_SCHEMA_CHANGE(1450, new byte[]{'H', 'Y', '0', '0', '0'}, "Changing schema from '%s' to '%s' is not " + - "allowed."), - ERR_ROW_IS_REFERENCED_2(1451, new byte[]{'2', '3', '0', '0', '0'}, "Cannot delete or update a parent row: a " + - "foreign key constraint fails (%s)"), - ERR_NO_REFERENCED_ROW_2(1452, new byte[]{'2', '3', '0', '0', '0'}, "Cannot add or update a child row: a foreign " + - "key constraint fails (%s)"), - ERR_SP_BAD_VAR_SHADOW(1453, new byte[]{'4', '2', '0', '0', '0'}, "Variable '%s' must be quoted with `...`, or " + - "renamed"), - ERR_TRG_NO_DEFINER(1454, new byte[]{'H', 'Y', '0', '0', '0'}, "No definer attribute for trigger '%s'.'%s'. The " + - "trigger will be activated under the authorization of the invoker, which may have insufficient privileges" + - ". " + - "Please recreate the trigger."), - ERR_OLD_FILE_FORMAT(1455, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' has an old format, you should re-create the " + - "'%s' object(s)"), - ERR_SP_RECURSION_LIMIT(1456, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursive limit %d (as set by the " + - "max_sp_recursion_depth variable) was exceeded for routine %s"), - ERR_SP_PROC_TABLE_CORRUPT(1457, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to load routine %s. The table mysql" + - ".proc is missing, corrupt, or contains bad data (internal code %d)"), + ERR_VIEW_FRM_NO_USER(1447, new byte[]{'H', 'Y', '0', '0', '0'}, "View '%s'.'%s' has no definer information (old " + + "table format). Current user is used as definer. Please recreate the view!"), + ERR_VIEW_OTHER_USER(1448, new byte[]{'H', 'Y', '0', '0', '0'}, "You need the SUPER privilege for creation view " + + "with '%s'@'%s' definer"), + ERR_NO_SUCH_USER(1449, new byte[]{'H', 'Y', '0', '0', '0'}, "The user specified as a definer ('%s'@'%s') does not" + + " exist"), + ERR_FORBID_SCHEMA_CHANGE(1450, new byte[]{'H', 'Y', '0', '0', '0'}, "Changing schema from '%s' to '%s' is not " + + "allowed."), + ERR_ROW_IS_REFERENCED_2(1451, new byte[]{'2', '3', '0', '0', '0'}, "Cannot delete or update a parent row: a " + + "foreign key constraint fails (%s)"), + ERR_NO_REFERENCED_ROW_2(1452, new byte[]{'2', '3', '0', '0', '0'}, "Cannot add or update a child row: a foreign " + + "key constraint fails (%s)"), + ERR_SP_BAD_VAR_SHADOW(1453, new byte[]{'4', '2', '0', '0', '0'}, "Variable '%s' must be quoted with `...`, or " + + "renamed"), + ERR_TRG_NO_DEFINER(1454, new byte[]{'H', 'Y', '0', '0', '0'}, "No definer attribute for trigger '%s'.'%s'. The " + + "trigger will be activated under the authorization of the invoker, which may have insufficient privileges" + + ". " + + "Please recreate the trigger."), + ERR_OLD_FILE_FORMAT(1455, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' has an old format, you should re-create the " + + "'%s' object(s)"), + ERR_SP_RECURSION_LIMIT(1456, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursive limit %d (as set by the " + + "max_sp_recursion_depth variable) was exceeded for routine %s"), + ERR_SP_PROC_TABLE_CORRUPT(1457, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to load routine %s. The table mysql" + + ".proc is missing, corrupt, or contains bad data (internal code %d)"), ERR_SP_WRONG_NAME(1458, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect routine name '%s'"), - ERR_TABLE_NEEDS_UPGRADE(1459, new byte[]{'H', 'Y', '0', '0', '0'}, "Table upgrade required. Please do \"REPAIR " + - "TABLE `%s`\" or dump/reload to fix it!"), + ERR_TABLE_NEEDS_UPGRADE(1459, new byte[]{'H', 'Y', '0', '0', '0'}, "Table upgrade required. Please do \"REPAIR " + + "TABLE `%s`\" or dump/reload to fix it!"), ERR_SP_NO_AGGREGATE(1460, new byte[]{'4', '2', '0', '0', '0'}, "AGGREGATE is not supported for stored functions"), - ERR_MAX_PREPARED_STMT_COUNT_REACHED(1461, new byte[]{'4', '2', '0', '0', '0'}, "Can't create more than " + - "max_prepared_stmt_count statements (current value: %d)"), + ERR_MAX_PREPARED_STMT_COUNT_REACHED(1461, new byte[]{'4', '2', '0', '0', '0'}, "Can't create more than " + + "max_prepared_stmt_count statements (current value: %d)"), ERR_VIEW_RECURSIVE(1462, new byte[]{'H', 'Y', '0', '0', '0'}, "`%s`.`%s` contains view recursion"), - ERR_NON_GROUPING_FIELD_USED(1463, new byte[]{'4', '2', '0', '0', '0'}, "Non-grouping field '%s' is used in %s " + - "clause"), - ERR_TABLE_CANT_HANDLE_SPKEYS(1464, new byte[]{'H', 'Y', '0', '0', '0'}, "The used table type doesn't support " + - "SPATIAL indexes"), - ERR_NO_TRIGGERS_ON_SYSTEM_SCHEMA(1465, new byte[]{'H', 'Y', '0', '0', '0'}, "Triggers can not be created on " + - "system tables"), + ERR_NON_GROUPING_FIELD_USED(1463, new byte[]{'4', '2', '0', '0', '0'}, "Non-grouping field '%s' is used in %s " + + "clause"), + ERR_TABLE_CANT_HANDLE_SPKEYS(1464, new byte[]{'H', 'Y', '0', '0', '0'}, "The used table type doesn't support " + + "SPATIAL indexes"), + ERR_NO_TRIGGERS_ON_SYSTEM_SCHEMA(1465, new byte[]{'H', 'Y', '0', '0', '0'}, "Triggers can not be created on " + + "system tables"), ERR_REMOVED_SPACES(1466, new byte[]{'H', 'Y', '0', '0', '0'}, "Leading spaces are removed from name '%s'"), - ERR_AUTOINC_READ_FAILED(1467, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to read auto-increment value from " + - "storage engine"), + ERR_AUTOINC_READ_FAILED(1467, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to read auto-increment value from " + + "storage engine"), ERR_USERNAME(1468, new byte[]{'H', 'Y', '0', '0', '0'}, "user name"), ERR_HOSTNAME(1469, new byte[]{'H', 'Y', '0', '0', '0'}, "host name"), - ERR_WRONG_STRING_LENGTH(1470, new byte[]{'H', 'Y', '0', '0', '0'}, "String '%s' is too long for %s (should be no " + - "longer than %d)"), - ERR_NON_INSERTABLE_TABLE(1471, new byte[]{'H', 'Y', '0', '0', '0'}, "The target table %s of the %s is not " + - "insertable-into"), - ERR_ADMIN_WRONG_MRG_TABLE(1472, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is differently defined or of " + - "non-MyISAM type or doesn't exist"), - ERR_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT(1473, new byte[]{'H', 'Y', '0', '0', '0'}, "Too high level of nesting " + - "for select"), + ERR_WRONG_STRING_LENGTH(1470, new byte[]{'H', 'Y', '0', '0', '0'}, "String '%s' is too long for %s (should be no " + + "longer than %d)"), + ERR_NON_INSERTABLE_TABLE(1471, new byte[]{'H', 'Y', '0', '0', '0'}, "The target table %s of the %s is not " + + "insertable-into"), + ERR_ADMIN_WRONG_MRG_TABLE(1472, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is differently defined or of " + + "non-MyISAM type or doesn't exist"), + ERR_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT(1473, new byte[]{'H', 'Y', '0', '0', '0'}, "Too high level of nesting " + + "for select"), ERR_NAME_BECOMES_EMPTY(1474, new byte[]{'H', 'Y', '0', '0', '0'}, "Name '%s' has become ''"), - ERR_AMBIGUOUS_FIELD_TERM(1475, new byte[]{'H', 'Y', '0', '0', '0'}, "First character of the FIELDS TERMINATED " + - "string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"), - ERR_FOREIGN_SERVER_EXISTS(1476, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign server, %s, you are trying to " + - "create already exists."), - ERR_FOREIGN_SERVER_DOESNT_EXIST(1477, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign server name you are " + - "trying to reference does not exist. Data source error: %s"), - ERR_ILLEGAL_HA_CREATE_OPTION(1478, new byte[]{'H', 'Y', '0', '0', '0'}, "Table storage engine '%s' does not " + - "support the create option '%s'"), - ERR_PARTITION_REQUIRES_VALUES_ERROR(1479, new byte[]{'H', 'Y', '0', '0', '0'}, "Syntax error: %s PARTITIONING " + - "requires definition of VALUES %s for each partition"), - ERR_PARTITION_WRONG_VALUES_ERROR(1480, new byte[]{'H', 'Y', '0', '0', '0'}, "Only %s PARTITIONING can use VALUES " + - "%s in partition definition"), - ERR_PARTITION_MAXVALUE_ERROR(1481, new byte[]{'H', 'Y', '0', '0', '0'}, "MAXVALUE can only be used in last " + - "partition definition"), - ERR_PARTITION_SUBPARTITION_ERROR(1482, new byte[]{'H', 'Y', '0', '0', '0'}, "Subpartitions can only be hash " + - "partitions and by key"), - ERR_PARTITION_SUBPART_MIX_ERROR(1483, new byte[]{'H', 'Y', '0', '0', '0'}, "Must define subpartitions on all " + - "partitions if on one partition"), - ERR_PARTITION_WRONG_NO_PART_ERROR(1484, new byte[]{'H', 'Y', '0', '0', '0'}, "Wrong number of partitions defined," + - " mismatch with previous setting"), - ERR_PARTITION_WRONG_NO_SUBPART_ERROR(1485, new byte[]{'H', 'Y', '0', '0', '0'}, "Wrong number of subpartitions " + - "defined, mismatch with previous setting"), - ERR_CONST_EXPR_IN_PARTITION_FUNC_ERROR(1486, new byte[]{'H', 'Y', '0', '0', '0'}, "Constant/Random expression in " + - "(sub)partitioning function is not allowed"), - ERR_WRONG_EXPR_IN_PARTITION_FUNC_ERROR(1486, new byte[]{'H', 'Y', '0', '0', '0'}, "Constant, random or " + - "timezone-dependent expressions in (sub)partitioning function are not allowed"), - ERR_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR(1487, new byte[]{'H', 'Y', '0', '0', '0'}, "Expression in RANGE/LIST " + - "VALUES must be constant"), - ERR_FIELD_NOT_FOUND_PART_ERROR(1488, new byte[]{'H', 'Y', '0', '0', '0'}, "Field in list of fields for partition " + - "function not found in table"), - ERR_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR(1489, new byte[]{'H', 'Y', '0', '0', '0'}, "List of fields is only allowed " + - "in KEY partitions"), - ERR_INCONSISTENT_PARTITION_INFO_ERROR(1490, new byte[]{'H', 'Y', '0', '0', '0'}, "The partition info in the frm " + - "file is not consistent with what can be written into the frm file"), - ERR_PARTITION_FUNC_NOT_ALLOWED_ERROR(1491, new byte[]{'H', 'Y', '0', '0', '0'}, "The %s function returns the " + - "wrong type"), - ERR_PARTITIONS_MUST_BE_DEFINED_ERROR(1492, new byte[]{'H', 'Y', '0', '0', '0'}, "For %s partitions each partition" + - " must be defined"), - ERR_RANGE_NOT_INCREASING_ERROR(1493, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES LESS THAN value must be " + - "strictly increasing for each partition"), - ERR_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR(1494, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES value must be of same" + - " type as partition function"), - ERR_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR(1495, new byte[]{'H', 'Y', '0', '0', '0'}, "Multiple definition of same" + - " constant in list partitioning"), - ERR_PARTITION_ENTRY_ERROR(1496, new byte[]{'H', 'Y', '0', '0', '0'}, "Partitioning can not be used stand-alone in" + - " query"), - ERR_MIX_HANDLER_ERROR(1497, new byte[]{'H', 'Y', '0', '0', '0'}, "The mix of handlers in the partitions is not " + - "allowed in this version of MariaDB"), - ERR_PARTITION_NOT_DEFINED_ERROR(1498, new byte[]{'H', 'Y', '0', '0', '0'}, "For the partitioned engine it is " + - "necessary to define all %s"), - ERR_TOO_MANY_PARTITIONS_ERROR(1499, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many partitions (including " + - "subpartitions) were defined"), - ERR_SUBPARTITION_ERROR(1500, new byte[]{'H', 'Y', '0', '0', '0'}, "It is only possible to mix RANGE/LIST " + - "partitioning with HASH/KEY partitioning for subpartitioning"), + ERR_AMBIGUOUS_FIELD_TERM(1475, new byte[]{'H', 'Y', '0', '0', '0'}, "First character of the FIELDS TERMINATED " + + "string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"), + ERR_FOREIGN_SERVER_EXISTS(1476, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign server, %s, you are trying to " + + "create already exists."), + ERR_FOREIGN_SERVER_DOESNT_EXIST(1477, new byte[]{'H', 'Y', '0', '0', '0'}, "The foreign server name you are " + + "trying to reference does not exist. Data source error: %s"), + ERR_ILLEGAL_HA_CREATE_OPTION(1478, new byte[]{'H', 'Y', '0', '0', '0'}, "Table storage engine '%s' does not " + + "support the create option '%s'"), + ERR_PARTITION_REQUIRES_VALUES_ERROR(1479, new byte[]{'H', 'Y', '0', '0', '0'}, "Syntax error: %s PARTITIONING " + + "requires definition of VALUES %s for each partition"), + ERR_PARTITION_WRONG_VALUES_ERROR(1480, new byte[]{'H', 'Y', '0', '0', '0'}, "Only %s PARTITIONING can use VALUES " + + "%s in partition definition"), + ERR_PARTITION_MAXVALUE_ERROR(1481, new byte[]{'H', 'Y', '0', '0', '0'}, "MAXVALUE can only be used in last " + + "partition definition"), + ERR_PARTITION_SUBPARTITION_ERROR(1482, new byte[]{'H', 'Y', '0', '0', '0'}, "Subpartitions can only be hash " + + "partitions and by key"), + ERR_PARTITION_SUBPART_MIX_ERROR(1483, new byte[]{'H', 'Y', '0', '0', '0'}, "Must define subpartitions on all " + + "partitions if on one partition"), + ERR_PARTITION_WRONG_NO_PART_ERROR(1484, new byte[]{'H', 'Y', '0', '0', '0'}, "Wrong number of partitions defined," + + " mismatch with previous setting"), + ERR_PARTITION_WRONG_NO_SUBPART_ERROR(1485, new byte[]{'H', 'Y', '0', '0', '0'}, "Wrong number of subpartitions " + + "defined, mismatch with previous setting"), + ERR_CONST_EXPR_IN_PARTITION_FUNC_ERROR(1486, new byte[]{'H', 'Y', '0', '0', '0'}, "Constant/Random expression in " + + "(sub)partitioning function is not allowed"), + ERR_WRONG_EXPR_IN_PARTITION_FUNC_ERROR(1486, new byte[]{'H', 'Y', '0', '0', '0'}, "Constant, random or " + + "timezone-dependent expressions in (sub)partitioning function are not allowed"), + ERR_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR(1487, new byte[]{'H', 'Y', '0', '0', '0'}, "Expression in RANGE/LIST " + + "VALUES must be constant"), + ERR_FIELD_NOT_FOUND_PART_ERROR(1488, new byte[]{'H', 'Y', '0', '0', '0'}, "Field in list of fields for partition " + + "function not found in table"), + ERR_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR(1489, new byte[]{'H', 'Y', '0', '0', '0'}, "List of fields is only allowed " + + "in KEY partitions"), + ERR_INCONSISTENT_PARTITION_INFO_ERROR(1490, new byte[]{'H', 'Y', '0', '0', '0'}, "The partition info in the frm " + + "file is not consistent with what can be written into the frm file"), + ERR_PARTITION_FUNC_NOT_ALLOWED_ERROR(1491, new byte[]{'H', 'Y', '0', '0', '0'}, "The %s function returns the " + + "wrong type"), + ERR_PARTITIONS_MUST_BE_DEFINED_ERROR(1492, new byte[]{'H', 'Y', '0', '0', '0'}, "For %s partitions each partition" + + " must be defined"), + ERR_RANGE_NOT_INCREASING_ERROR(1493, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES LESS THAN value must be " + + "strictly increasing for each partition"), + ERR_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR(1494, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES value must be of same" + + " type as partition function"), + ERR_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR(1495, new byte[]{'H', 'Y', '0', '0', '0'}, "Multiple definition of same" + + " constant in list partitioning"), + ERR_PARTITION_ENTRY_ERROR(1496, new byte[]{'H', 'Y', '0', '0', '0'}, "Partitioning can not be used stand-alone in" + + " query"), + ERR_MIX_HANDLER_ERROR(1497, new byte[]{'H', 'Y', '0', '0', '0'}, "The mix of handlers in the partitions is not " + + "allowed in this version of MariaDB"), + ERR_PARTITION_NOT_DEFINED_ERROR(1498, new byte[]{'H', 'Y', '0', '0', '0'}, "For the partitioned engine it is " + + "necessary to define all %s"), + ERR_TOO_MANY_PARTITIONS_ERROR(1499, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many partitions (including " + + "subpartitions) were defined"), + ERR_SUBPARTITION_ERROR(1500, new byte[]{'H', 'Y', '0', '0', '0'}, "It is only possible to mix RANGE/LIST " + + "partitioning with HASH/KEY partitioning for subpartitioning"), ERR_CANT_CREATE_HANDLER_FILE(1501, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to create specific handler file"), - ERR_BLOB_FIELD_IN_PART_FUNC_ERROR(1502, new byte[]{'H', 'Y', '0', '0', '0'}, "A BLOB field is not allowed in " + - "partition function"), - ERR_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF(1503, new byte[]{'H', 'Y', '0', '0', '0'}, "A %s must include all columns in" + - " the table's partitioning function"), + ERR_BLOB_FIELD_IN_PART_FUNC_ERROR(1502, new byte[]{'H', 'Y', '0', '0', '0'}, "A BLOB field is not allowed in " + + "partition function"), + ERR_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF(1503, new byte[]{'H', 'Y', '0', '0', '0'}, "A %s must include all columns in" + + " the table's partitioning function"), ERR_NO_PARTS_ERROR(1504, new byte[]{'H', 'Y', '0', '0', '0'}, "Number of %s = 0 is not an allowed value"), - ERR_PARTITION_MGMT_ON_NONPARTITIONED(1505, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition management on a not " + - "partitioned table is not possible"), - ERR_FOREIGN_KEY_ON_PARTITIONED(1506, new byte[]{'H', 'Y', '0', '0', '0'}, "Foreign key clause is not yet " + - "supported in conjunction with partitioning"), + ERR_PARTITION_MGMT_ON_NONPARTITIONED(1505, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition management on a not " + + "partitioned table is not possible"), + ERR_FOREIGN_KEY_ON_PARTITIONED(1506, new byte[]{'H', 'Y', '0', '0', '0'}, "Foreign key clause is not yet " + + "supported in conjunction with partitioning"), ERR_DROP_PARTITION_NON_EXISTENT(1507, new byte[]{'H', 'Y', '0', '0', '0'}, "Error in list of partitions to %s"), - ERR_DROP_LAST_PARTITION(1508, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot remove all partitions, use DROP TABLE " + - "instead"), - ERR_COALESCE_ONLY_ON_HASH_PARTITION(1509, new byte[]{'H', 'Y', '0', '0', '0'}, "COALESCE PARTITION can only be " + - "used on HASH/KEY partitions"), - ERR_REORG_HASH_ONLY_ON_SAME_N(1510, new byte[]{'H', 'Y', '0', '0', '0'}, "REORGANIZE PARTITION can only be used " + - "to reorganize partitions not to change their numbers"), - ERR_REORG_NO_PARAM_ERROR(1511, new byte[]{'H', 'Y', '0', '0', '0'}, "REORGANIZE PARTITION without parameters can " + - "only be used on auto-partitioned tables using HASH PARTITIONs"), - ERR_ONLY_ON_RANGE_LIST_PARTITION(1512, new byte[]{'H', 'Y', '0', '0', '0'}, "%s PARTITION can only be used on " + - "RANGE/LIST partitions"), - ERR_ADD_PARTITION_SUBPART_ERROR(1513, new byte[]{'H', 'Y', '0', '0', '0'}, "Trying to Add partition(s) with wrong" + - " number of subpartitions"), - ERR_ADD_PARTITION_NO_NEW_PARTITION(1514, new byte[]{'H', 'Y', '0', '0', '0'}, "At least one partition must be " + - "added"), - ERR_COALESCE_PARTITION_NO_PARTITION(1515, new byte[]{'H', 'Y', '0', '0', '0'}, "At least one partition must be " + - "coalesced"), - ERR_REORG_PARTITION_NOT_EXIST(1516, new byte[]{'H', 'Y', '0', '0', '0'}, "More partitions to reorganize than " + - "there are partitions"), + ERR_DROP_LAST_PARTITION(1508, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot remove all partitions, use DROP TABLE " + + "instead"), + ERR_COALESCE_ONLY_ON_HASH_PARTITION(1509, new byte[]{'H', 'Y', '0', '0', '0'}, "COALESCE PARTITION can only be " + + "used on HASH/KEY partitions"), + ERR_REORG_HASH_ONLY_ON_SAME_N(1510, new byte[]{'H', 'Y', '0', '0', '0'}, "REORGANIZE PARTITION can only be used " + + "to reorganize partitions not to change their numbers"), + ERR_REORG_NO_PARAM_ERROR(1511, new byte[]{'H', 'Y', '0', '0', '0'}, "REORGANIZE PARTITION without parameters can " + + "only be used on auto-partitioned tables using HASH PARTITIONs"), + ERR_ONLY_ON_RANGE_LIST_PARTITION(1512, new byte[]{'H', 'Y', '0', '0', '0'}, "%s PARTITION can only be used on " + + "RANGE/LIST partitions"), + ERR_ADD_PARTITION_SUBPART_ERROR(1513, new byte[]{'H', 'Y', '0', '0', '0'}, "Trying to Add partition(s) with wrong" + + " number of subpartitions"), + ERR_ADD_PARTITION_NO_NEW_PARTITION(1514, new byte[]{'H', 'Y', '0', '0', '0'}, "At least one partition must be " + + "added"), + ERR_COALESCE_PARTITION_NO_PARTITION(1515, new byte[]{'H', 'Y', '0', '0', '0'}, "At least one partition must be " + + "coalesced"), + ERR_REORG_PARTITION_NOT_EXIST(1516, new byte[]{'H', 'Y', '0', '0', '0'}, "More partitions to reorganize than " + + "there are partitions"), ERR_SAME_NAME_PARTITION(1517, new byte[]{'H', 'Y', '0', '0', '0'}, "Duplicate partition name %s"), - ERR_NO_BINLOG_ERROR(1518, new byte[]{'H', 'Y', '0', '0', '0'}, "It is not allowed to shut off binlog on this " + - "command"), - ERR_CONSECUTIVE_REORG_PARTITIONS(1519, new byte[]{'H', 'Y', '0', '0', '0'}, "When reorganizing a set of " + - "partitions they must be in consecutive order"), - ERR_REORG_OUTSIDE_RANGE(1520, new byte[]{'H', 'Y', '0', '0', '0'}, "Reorganize of range partitions cannot change " + - "total ranges except for last partition where it can extend the range"), - ERR_PARTITION_FUNCTION_FAILURE(1521, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition function not supported in " + - "this version for this handler"), - ERR_PART_STATE_ERROR(1522, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition state cannot be defined from " + - "CREATE/ALTER TABLE"), - ERR_LIMITED_PART_RANGE(1523, new byte[]{'H', 'Y', '0', '0', '0'}, "The %s handler only supports 32 bit integers " + - "in VALUES"), + ERR_NO_BINLOG_ERROR(1518, new byte[]{'H', 'Y', '0', '0', '0'}, "It is not allowed to shut off binlog on this " + + "command"), + ERR_CONSECUTIVE_REORG_PARTITIONS(1519, new byte[]{'H', 'Y', '0', '0', '0'}, "When reorganizing a set of " + + "partitions they must be in consecutive order"), + ERR_REORG_OUTSIDE_RANGE(1520, new byte[]{'H', 'Y', '0', '0', '0'}, "Reorganize of range partitions cannot change " + + "total ranges except for last partition where it can extend the range"), + ERR_PARTITION_FUNCTION_FAILURE(1521, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition function not supported in " + + "this version for this handler"), + ERR_PART_STATE_ERROR(1522, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition state cannot be defined from " + + "CREATE/ALTER TABLE"), + ERR_LIMITED_PART_RANGE(1523, new byte[]{'H', 'Y', '0', '0', '0'}, "The %s handler only supports 32 bit integers " + + "in VALUES"), ERR_PLUGIN_IS_NOT_LOADED(1524, new byte[]{'H', 'Y', '0', '0', '0'}, "Plugin '%s' is not loaded"), ERR_WRONG_VALUE(1525, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect %s value: '%s'"), ERR_NO_PARTITION_FOR_GIVEN_VALUE(1526, new byte[]{'H', 'Y', '0', '0', '0'}, "Table has no partition for value %s"), - ERR_FILEGROUP_OPTION_ONLY_ONCE(1527, new byte[]{'H', 'Y', '0', '0', '0'}, "It is not allowed to specify %s more " + - "than once"), + ERR_FILEGROUP_OPTION_ONLY_ONCE(1527, new byte[]{'H', 'Y', '0', '0', '0'}, "It is not allowed to specify %s more " + + "than once"), ERR_CREATE_FILEGROUP_FAILED(1528, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to create %s"), ERR_DROP_FILEGROUP_FAILED(1529, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to drop %s"), - ERR_TABLESPACE_AUTO_EXTEND_ERROR(1530, new byte[]{'H', 'Y', '0', '0', '0'}, "The handler doesn't support " + - "autoextend of tablespaces"), - ERR_WRONG_SIZE_NUMBER(1531, new byte[]{'H', 'Y', '0', '0', '0'}, "A size parameter was incorrectly specified, " + - "either number or on the form 10M"), - ERR_SIZE_OVERFLOW_ERROR(1532, new byte[]{'H', 'Y', '0', '0', '0'}, "The size number was correct but we don't " + - "allow the digit part to be more than 2 billion"), + ERR_TABLESPACE_AUTO_EXTEND_ERROR(1530, new byte[]{'H', 'Y', '0', '0', '0'}, "The handler doesn't support " + + "autoextend of tablespaces"), + ERR_WRONG_SIZE_NUMBER(1531, new byte[]{'H', 'Y', '0', '0', '0'}, "A size parameter was incorrectly specified, " + + "either number or on the form 10M"), + ERR_SIZE_OVERFLOW_ERROR(1532, new byte[]{'H', 'Y', '0', '0', '0'}, "The size number was correct but we don't " + + "allow the digit part to be more than 2 billion"), ERR_ALTER_FILEGROUP_FAILED(1533, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to alter: %s"), - ERR_BINLOG_ROW_LOGGING_FAILED(1534, new byte[]{'H', 'Y', '0', '0', '0'}, "Writing one row to the row-based binary" + - " log failed"), - ERR_BINLOG_ROW_WRONG_TABLE_DEF(1535, new byte[]{'H', 'Y', '0', '0', '0'}, "Table definition on master and slave " + - "does not match: %s"), - ERR_BINLOG_ROW_RBR_TO_SBR(1536, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave running with --log-slave-updates must" + - " use row-based binary logging to be able to replicate row-based binary log events"), + ERR_BINLOG_ROW_LOGGING_FAILED(1534, new byte[]{'H', 'Y', '0', '0', '0'}, "Writing one row to the row-based binary" + + " log failed"), + ERR_BINLOG_ROW_WRONG_TABLE_DEF(1535, new byte[]{'H', 'Y', '0', '0', '0'}, "Table definition on master and slave " + + "does not match: %s"), + ERR_BINLOG_ROW_RBR_TO_SBR(1536, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave running with --log-slave-updates must" + + " use row-based binary logging to be able to replicate row-based binary log events"), ERR_EVENT_ALREADY_EXISTS(1537, new byte[]{'H', 'Y', '0', '0', '0'}, "Event '%s' already exists"), - ERR_EVENT_STORE_FAILED(1538, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to store event %s. Error code %d from " + - "storage engine."), + ERR_EVENT_STORE_FAILED(1538, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to store event %s. Error code %d from " + + "storage engine."), ERR_EVENT_DOES_NOT_EXIST(1539, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown event '%s'"), ERR_EVENT_CANT_ALTER(1540, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to alter event '%s'"), ERR_EVENT_DROP_FAILED(1541, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to drop %s"), - ERR_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG(1542, new byte[]{'H', 'Y', '0', '0', '0'}, "INTERVAL is either not " + - "positive or too big"), + ERR_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG(1542, new byte[]{'H', 'Y', '0', '0', '0'}, "INTERVAL is either not " + + "positive or too big"), ERR_EVENT_ENDS_BEFORE_STARTS(1543, new byte[]{'H', 'Y', '0', '0', '0'}, "ENDS is either invalid or before STARTS"), - ERR_EVENT_EXEC_TIME_IN_THE_PAST(1544, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the past. " + - "Event has been disabled"), + ERR_EVENT_EXEC_TIME_IN_THE_PAST(1544, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the past. " + + "Event has been disabled"), ERR_EVENT_OPEN_TABLE_FAILED(1545, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to open mysql.event"), ERR_EVENT_NEITHER_M_EXPR_NOR_M_AT(1546, new byte[]{'H', 'Y', '0', '0', '0'}, "No datetime expression provided"), - ERR_COL_COUNT_DOESNT_MATCH_CORRUPTED(1547, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of mysql.%s is " + - "wrong. Expected %d, found %d. The table is probably corrupted"), - ERR_CANNOT_LOAD_FROM_TABLE(1548, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot load from mysql.%s. The table is " + - "probably corrupted"), + ERR_COL_COUNT_DOESNT_MATCH_CORRUPTED(1547, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of mysql.%s is " + + "wrong. Expected %d, found %d. The table is probably corrupted"), + ERR_CANNOT_LOAD_FROM_TABLE(1548, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot load from mysql.%s. The table is " + + "probably corrupted"), ERR_EVENT_CANNOT_DELETE(1549, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to delete the event from mysql.event"), ERR_EVENT_COMPILE_ERROR(1550, new byte[]{'H', 'Y', '0', '0', '0'}, "Error during compilation of event's body"), ERR_EVENT_SAME_NAME(1551, new byte[]{'H', 'Y', '0', '0', '0'}, "Same old and new event name"), ERR_EVENT_DATA_TOO_LONG(1552, new byte[]{'H', 'Y', '0', '0', '0'}, "Data for column '%s' too long"), - ERR_DROP_INDEX_FK(1553, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop index '%s': needed in a foreign key " + - "constraint"), - ERR_WARN_DEPRECATED_SYNTAX_WITH_VER(1554, new byte[]{'H', 'Y', '0', '0', '0'}, "The syntax '%s' is deprecated and" + - " will be removed in MariaDB %s. Please use %s instead"), - ERR_CANT_WRITE_LOCK_LOG_TABLE(1555, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't write-lock a log table. Only " + - "read access is possible"), + ERR_DROP_INDEX_FK(1553, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop index '%s': needed in a foreign key " + + "constraint"), + ERR_WARN_DEPRECATED_SYNTAX_WITH_VER(1554, new byte[]{'H', 'Y', '0', '0', '0'}, "The syntax '%s' is deprecated and" + + " will be removed in MariaDB %s. Please use %s instead"), + ERR_CANT_WRITE_LOCK_LOG_TABLE(1555, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't write-lock a log table. Only " + + "read access is possible"), ERR_CANT_LOCK_LOG_TABLE(1556, new byte[]{'H', 'Y', '0', '0', '0'}, "You can't use locks with log tables."), - ERR_FOREIGN_DUPLICATE_KEY(1557, new byte[]{'2', '3', '0', '0', '0'}, "Upholding foreign key constraints for table" + - " '%s', entry '%s', key %d would lead to a duplicate entry"), - ERR_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE(1558, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of mysql.%s is " + - "wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix " + - "this " + - "error."), - ERR_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR(1559, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot switch out of the " + - "row-based binary log format when the session has open temporary tables"), - ERR_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT(1560, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + - "binary logging format inside a stored function or trigger"), + ERR_FOREIGN_DUPLICATE_KEY(1557, new byte[]{'2', '3', '0', '0', '0'}, "Upholding foreign key constraints for table" + + " '%s', entry '%s', key %d would lead to a duplicate entry"), + ERR_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE(1558, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of mysql.%s is " + + "wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix " + + "this " + + "error."), + ERR_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR(1559, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot switch out of the " + + "row-based binary log format when the session has open temporary tables"), + ERR_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT(1560, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + + "binary logging format inside a stored function or trigger"), ERR_UNUSED_13(1561, new byte[]{}, "You should never see it"), - ERR_PARTITION_NO_TEMPORARY(1562, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create temporary table with " + - "partitions"), - ERR_PARTITION_CONST_DOMAIN_ERROR(1563, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition constant is out of " + - "partition function domain"), - ERR_PARTITION_FUNCTION_IS_NOT_ALLOWED(1564, new byte[]{'H', 'Y', '0', '0', '0'}, "This partition function is not " + - "allowed"), + ERR_PARTITION_NO_TEMPORARY(1562, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create temporary table with " + + "partitions"), + ERR_PARTITION_CONST_DOMAIN_ERROR(1563, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition constant is out of " + + "partition function domain"), + ERR_PARTITION_FUNCTION_IS_NOT_ALLOWED(1564, new byte[]{'H', 'Y', '0', '0', '0'}, "This partition function is not " + + "allowed"), ERR_DDL_LOG_ERROR(1565, new byte[]{'H', 'Y', '0', '0', '0'}, "Error in DDL log"), - ERR_NULL_IN_VALUES_LESS_THAN(1566, new byte[]{'H', 'Y', '0', '0', '0'}, "Not allowed to use NULL value in VALUES " + - "LESS THAN"), + ERR_NULL_IN_VALUES_LESS_THAN(1566, new byte[]{'H', 'Y', '0', '0', '0'}, "Not allowed to use NULL value in VALUES " + + "LESS THAN"), ERR_WRONG_PARTITION_NAME(1567, new byte[]{'H', 'Y', '0', '0', '0'}, "Incorrect partition name"), - ERR_CANT_CHANGE_TX_ISOLATION(1568, new byte[]{'2', '5', '0', '0', '1'}, "Transaction isolation level can't be " + - "changed while a transaction is in progress"), - ERR_DUP_ENTRY_AUTOINCREMENT_CASE(1569, new byte[]{'H', 'Y', '0', '0', '0'}, "ALTER TABLE causes auto_increment " + - "resequencing, resulting in duplicate entry '%s' for key '%s'"), + ERR_CANT_CHANGE_TX_ISOLATION(1568, new byte[]{'2', '5', '0', '0', '1'}, "Transaction isolation level can't be " + + "changed while a transaction is in progress"), + ERR_DUP_ENTRY_AUTOINCREMENT_CASE(1569, new byte[]{'H', 'Y', '0', '0', '0'}, "ALTER TABLE causes auto_increment " + + "resequencing, resulting in duplicate entry '%s' for key '%s'"), ERR_EVENT_MODIFY_QUEUE_ERROR(1570, new byte[]{'H', 'Y', '0', '0', '0'}, "Internal scheduler error %d"), - ERR_EVENT_SET_VAR_ERROR(1571, new byte[]{'H', 'Y', '0', '0', '0'}, "Error during starting/stopping of the " + - "scheduler. Error code %u"), + ERR_EVENT_SET_VAR_ERROR(1571, new byte[]{'H', 'Y', '0', '0', '0'}, "Error during starting/stopping of the " + + "scheduler. Error code %u"), ERR_PARTITION_MERGE_ERROR(1572, new byte[]{'H', 'Y', '0', '0', '0'}, "Engine cannot be used in partitioned tables"), ERR_CANT_ACTIVATE_LOG(1573, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot activate '%s' log"), - ERR_RBR_NOT_AVAILABLE(1574, new byte[]{'H', 'Y', '0', '0', '0'}, "The server was not built with row-based " + - "replication"), + ERR_RBR_NOT_AVAILABLE(1574, new byte[]{'H', 'Y', '0', '0', '0'}, "The server was not built with row-based " + + "replication"), ERR_BASE64_DECODE_ERROR(1575, new byte[]{'H', 'Y', '0', '0', '0'}, "Decoding of base64 string failed"), - ERR_EVENT_RECURSION_FORBIDDEN(1576, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursion of EVENT DDL statements is " + - "forbidden when body is present"), - ERR_EVENTS_DB_ERROR(1577, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot proceed because system tables used by " + - "Event Scheduler were found damaged at server start"), + ERR_EVENT_RECURSION_FORBIDDEN(1576, new byte[]{'H', 'Y', '0', '0', '0'}, "Recursion of EVENT DDL statements is " + + "forbidden when body is present"), + ERR_EVENTS_DB_ERROR(1577, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot proceed because system tables used by " + + "Event Scheduler were found damaged at server start"), ERR_ONLY_INTEGERS_ALLOWED(1578, new byte[]{'H', 'Y', '0', '0', '0'}, "Only integers allowed as number here"), - ERR_UNSUPORTED_LOG_ENGINE(1579, new byte[]{'H', 'Y', '0', '0', '0'}, "This storage engine cannot be used for log " + - "tables"), - ERR_BAD_LOG_STATEMENT(1580, new byte[]{'H', 'Y', '0', '0', '0'}, "You cannot '%s' a log table if logging is " + - "enabled"), - ERR_CANT_RENAME_LOG_TABLE(1581, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot rename '%s'. When logging enabled, " + - "rename to/from log table must rename two tables: the log table to an archive table and another table " + - "back to " + - "'%s'"), - ERR_WRONG_PARAMCOUNT_TO_NATIVE_FCT(1582, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameter count in the " + - "call to native function '%s'"), - ERR_WRONG_PARAMETERS_TO_NATIVE_FCT(1583, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameters in the call " + - "to native function '%s'"), - ERR_WRONG_PARAMETERS_TO_STORED_FCT(1584, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameters in the call " + - "to stored function '%s'"), - ERR_NATIVE_FCT_NAME_COLLISION(1585, new byte[]{'H', 'Y', '0', '0', '0'}, "This function '%s' has the same name as" + - " a native function"), + ERR_UNSUPORTED_LOG_ENGINE(1579, new byte[]{'H', 'Y', '0', '0', '0'}, "This storage engine cannot be used for log " + + "tables"), + ERR_BAD_LOG_STATEMENT(1580, new byte[]{'H', 'Y', '0', '0', '0'}, "You cannot '%s' a log table if logging is " + + "enabled"), + ERR_CANT_RENAME_LOG_TABLE(1581, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot rename '%s'. When logging enabled, " + + "rename to/from log table must rename two tables: the log table to an archive table and another table " + + "back to " + + "'%s'"), + ERR_WRONG_PARAMCOUNT_TO_NATIVE_FCT(1582, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameter count in the " + + "call to native function '%s'"), + ERR_WRONG_PARAMETERS_TO_NATIVE_FCT(1583, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameters in the call " + + "to native function '%s'"), + ERR_WRONG_PARAMETERS_TO_STORED_FCT(1584, new byte[]{'4', '2', '0', '0', '0'}, "Incorrect parameters in the call " + + "to stored function '%s'"), + ERR_NATIVE_FCT_NAME_COLLISION(1585, new byte[]{'H', 'Y', '0', '0', '0'}, "This function '%s' has the same name as" + + " a native function"), ERR_DUP_ENTRY_WITH_KEY_NAME(1586, new byte[]{'2', '3', '0', '0', '0'}, "Duplicate entry '%s' for key '%s'"), - ERR_BINLOG_PURGE_EMFILE(1587, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many files opened, please execute the " + - "command again"), - ERR_EVENT_CANNOT_CREATE_IN_THE_PAST(1588, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the " + - "past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."), - ERR_EVENT_CANNOT_ALTER_IN_THE_PAST(1589, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the " + - "past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."), + ERR_BINLOG_PURGE_EMFILE(1587, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many files opened, please execute the " + + "command again"), + ERR_EVENT_CANNOT_CREATE_IN_THE_PAST(1588, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the " + + "past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."), + ERR_EVENT_CANNOT_ALTER_IN_THE_PAST(1589, new byte[]{'H', 'Y', '0', '0', '0'}, "Event execution time is in the " + + "past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."), ERR_SLAVE_INCIDENT(1590, new byte[]{'H', 'Y', '0', '0', '0'}, "The incident %s occurred on the master. Message: %s"), - ERR_NO_PARTITION_FOR_GIVEN_VALUE_SILENT(1591, new byte[]{'H', 'Y', '0', '0', '0'}, "Table has no partition for " + - "some existing values"), - ERR_BINLOG_UNSAFE_STATEMENT(1592, new byte[]{'H', 'Y', '0', '0', '0'}, "Unsafe statement written to the binary " + - "log using statement format since BINLOG_FORMAT = STATEMENT. %s"), + ERR_NO_PARTITION_FOR_GIVEN_VALUE_SILENT(1591, new byte[]{'H', 'Y', '0', '0', '0'}, "Table has no partition for " + + "some existing values"), + ERR_BINLOG_UNSAFE_STATEMENT(1592, new byte[]{'H', 'Y', '0', '0', '0'}, "Unsafe statement written to the binary " + + "log using statement format since BINLOG_FORMAT = STATEMENT. %s"), ERR_SLAVE_FATAL_ERROR(1593, new byte[]{'H', 'Y', '0', '0', '0'}, "Fatal error: %s"), ERR_SLAVE_RELAY_LOG_READ_FAILURE(1594, new byte[]{'H', 'Y', '0', '0', '0'}, "Relay log read failure: %s"), ERR_SLAVE_RELAY_LOG_WRITE_FAILURE(1595, new byte[]{'H', 'Y', '0', '0', '0'}, "Relay log write failure: %s"), ERR_SLAVE_CREATE_EVENT_FAILURE(1596, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to create %s"), ERR_SLAVE_MASTER_COM_FAILURE(1597, new byte[]{'H', 'Y', '0', '0', '0'}, "Master command %s failed: %s"), - ERR_BINLOG_LOGGING_IMPOSSIBLE(1598, new byte[]{'H', 'Y', '0', '0', '0'}, "Binary logging not possible. Message: " + - "%s"), + ERR_BINLOG_LOGGING_IMPOSSIBLE(1598, new byte[]{'H', 'Y', '0', '0', '0'}, "Binary logging not possible. Message: " + + "%s"), ERR_VIEW_NO_CREATION_CTX(1599, new byte[]{'H', 'Y', '0', '0', '0'}, "View `%s`.`%s` has no creation context"), - ERR_VIEW_INVALID_CREATION_CTX(1600, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of view `%s`.`%s' is " + - "invalid"), - ERR_SR_INVALID_CREATION_CTX(1601, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of stored routine `%s`" + - ".`%s` is invalid"), + ERR_VIEW_INVALID_CREATION_CTX(1600, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of view `%s`.`%s' is " + + "invalid"), + ERR_SR_INVALID_CREATION_CTX(1601, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of stored routine `%s`" + + ".`%s` is invalid"), ERR_TRG_CORRUPTED_FILE(1602, new byte[]{'H', 'Y', '0', '0', '0'}, "Corrupted TRG file for table `%s`.`%s`"), - ERR_TRG_NO_CREATION_CTX(1603, new byte[]{'H', 'Y', '0', '0', '0'}, "Triggers for table `%s`.`%s` have no creation" + - " context"), - ERR_TRG_INVALID_CREATION_CTX(1604, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger creation context of table `%s`" + - ".`%s` is invalid"), - ERR_EVENT_INVALID_CREATION_CTX(1605, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of event `%s`.`%s` is" + - " invalid"), + ERR_TRG_NO_CREATION_CTX(1603, new byte[]{'H', 'Y', '0', '0', '0'}, "Triggers for table `%s`.`%s` have no creation" + + " context"), + ERR_TRG_INVALID_CREATION_CTX(1604, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger creation context of table `%s`" + + ".`%s` is invalid"), + ERR_EVENT_INVALID_CREATION_CTX(1605, new byte[]{'H', 'Y', '0', '0', '0'}, "Creation context of event `%s`.`%s` is" + + " invalid"), ERR_TRG_CANT_OPEN_TABLE(1606, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot open table for trigger `%s`.`%s`"), - ERR_CANT_CREATE_SROUTINE(1607, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create stored routine `%s`. Check " + - "warnings"), + ERR_CANT_CREATE_SROUTINE(1607, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create stored routine `%s`. Check " + + "warnings"), ERR_UNUSED_11(1608, new byte[]{}, "You should never see it"), - ERR_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT(1609, new byte[]{'H', 'Y', '0', '0', '0'}, "The BINLOG " + - "statement of type `%s` was not preceded by a format description BINLOG statement."), + ERR_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT(1609, new byte[]{'H', 'Y', '0', '0', '0'}, "The BINLOG " + + "statement of type `%s` was not preceded by a format description BINLOG statement."), ERR_SLAVE_CORRUPT_EVENT(1610, new byte[]{'H', 'Y', '0', '0', '0'}, "Corrupted replication event was detected"), - ERR_LOAD_DATA_INVALID_COLUMN(1611, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid column reference (%s) in LOAD " + - "DATA"), + ERR_LOAD_DATA_INVALID_COLUMN(1611, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid column reference (%s) in LOAD " + + "DATA"), ERR_LOG_PURGE_NO_FILE(1612, new byte[]{'H', 'Y', '0', '0', '0'}, "Being purged log %s was not found"), - ERR_XA_RBTIMEOUT(1613, new byte[]{'X', 'A', '1', '0', '6'}, "XA_RBTIMEOUT: Transaction branch was rolled back: " + - "took too long"), - ERR_XA_RBDEADLOCK(1614, new byte[]{'X', 'A', '1', '0', '2'}, "XA_RBDEADLOCK: Transaction branch was rolled back: " + - "deadlock was detected"), + ERR_XA_RBTIMEOUT(1613, new byte[]{'X', 'A', '1', '0', '6'}, "XA_RBTIMEOUT: Transaction branch was rolled back: " + + "took too long"), + ERR_XA_RBDEADLOCK(1614, new byte[]{'X', 'A', '1', '0', '2'}, "XA_RBDEADLOCK: Transaction branch was rolled back: " + + "deadlock was detected"), ERR_NEED_REPREPARE(1615, new byte[]{'H', 'Y', '0', '0', '0'}, "Prepared statement needs to be re-prepared"), ERR_DELAYED_NOT_SUPPORTED(1616, new byte[]{'H', 'Y', '0', '0', '0'}, "DELAYED option not supported for table '%s'"), WARN_NO_MASTER_INF(1617, new byte[]{'H', 'Y', '0', '0', '0'}, "The master info structure does not exist"), WARN_OPTION_IGNORED(1618, new byte[]{'H', 'Y', '0', '0', '0'}, "<%s> option ignored"), WARN_PLUGIN_DELETE_BUILTIN(1619, new byte[]{'H', 'Y', '0', '0', '0'}, "Built-in plugins cannot be deleted"), WARN_PLUGIN_BUSY(1620, new byte[]{'H', 'Y', '0', '0', '0'}, "Plugin is busy and will be uninstalled on shutdown"), - ERR_VARIABLE_IS_READONLY(1621, new byte[]{'H', 'Y', '0', '0', '0'}, "%s variable '%s' is read-only. Use SET %s to" + - " assign the value"), - ERR_WARN_ENGINE_TRANSACTION_ROLLBACK(1622, new byte[]{'H', 'Y', '0', '0', '0'}, "Storage engine %s does not " + - "support rollback for this statement. Transaction rolled back and must be restarted"), + ERR_VARIABLE_IS_READONLY(1621, new byte[]{'H', 'Y', '0', '0', '0'}, "%s variable '%s' is read-only. Use SET %s to" + + " assign the value"), + ERR_WARN_ENGINE_TRANSACTION_ROLLBACK(1622, new byte[]{'H', 'Y', '0', '0', '0'}, "Storage engine %s does not " + + "support rollback for this statement. Transaction rolled back and must be restarted"), ERR_SLAVE_HEARTBEAT_FAILURE(1623, new byte[]{'H', 'Y', '0', '0', '0'}, "Unexpected master's heartbeat data: %s"), - ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE(1624, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for the " + - "heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."), + ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE(1624, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for the " + + "heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."), ERR_UNUSED_14(1625, new byte[]{}, "You should never see it"), - ERR_CONFLICT_FN_PARSE_ERROR(1626, new byte[]{'H', 'Y', '0', '0', '0'}, "Error in parsing conflict function. " + - "Message: %s"), - ERR_EXCEPTIONS_WRITE_ERROR(1627, new byte[]{'H', 'Y', '0', '0', '0'}, "Write to exceptions table failed. Message:" + - " %s"), - ERR_TOO_LONG_TABLE_COMMENT(1628, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for table '%s' is too long (max = " + - "%d)"), - ERR_TOO_LONG_FIELD_COMMENT(1629, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for field '%s' is too long (max = " + - "%d)"), - ERR_FUNC_INEXISTENT_NAME_COLLISION(1630, new byte[]{'4', '2', '0', '0', '0'}, "FUNCTION %s does not exist. Check " + - "the 'Function Name Parsing and Resolution' section in the Reference Manual"), + ERR_CONFLICT_FN_PARSE_ERROR(1626, new byte[]{'H', 'Y', '0', '0', '0'}, "Error in parsing conflict function. " + + "Message: %s"), + ERR_EXCEPTIONS_WRITE_ERROR(1627, new byte[]{'H', 'Y', '0', '0', '0'}, "Write to exceptions table failed. Message:" + + " %s"), + ERR_TOO_LONG_TABLE_COMMENT(1628, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for table '%s' is too long (max = " + + "%d)"), + ERR_TOO_LONG_FIELD_COMMENT(1629, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for field '%s' is too long (max = " + + "%d)"), + ERR_FUNC_INEXISTENT_NAME_COLLISION(1630, new byte[]{'4', '2', '0', '0', '0'}, "FUNCTION %s does not exist. Check " + + "the 'Function Name Parsing and Resolution' section in the Reference Manual"), ERR_DATABASE_NAME(1631, new byte[]{'H', 'Y', '0', '0', '0'}, "Database"), ERR_TABLE_NAME(1632, new byte[]{'H', 'Y', '0', '0', '0'}, "Table"), ERR_PARTITION_NAME(1633, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition"), @@ -1016,8 +1016,8 @@ public enum ErrorCode { ERR_TEMPORARY_NAME(1635, new byte[]{'H', 'Y', '0', '0', '0'}, "Temporary"), ERR_RENAMED_NAME(1636, new byte[]{'H', 'Y', '0', '0', '0'}, "Renamed"), ERR_TOO_MANY_CONCURRENT_TRXS(1637, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many active concurrent transactions"), - WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED(1638, new byte[]{'H', 'Y', '0', '0', '0'}, "Non-ASCII separator " + - "arguments are not fully supported"), + WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED(1638, new byte[]{'H', 'Y', '0', '0', '0'}, "Non-ASCII separator " + + "arguments are not fully supported"), ERR_DEBUG_SYNC_TIMEOUT(1639, new byte[]{'H', 'Y', '0', '0', '0'}, "debug sync point wait timed out"), ERR_DEBUG_SYNC_HIT_LIMIT(1640, new byte[]{'H', 'Y', '0', '0', '0'}, "debug sync point hit limit reached"), ERR_DUP_SIGNAL_SET(1641, new byte[]{'4', '2', '0', '0', '0'}, "Duplicate condition information item '%s'"), @@ -1025,507 +1025,507 @@ public enum ErrorCode { ERR_SIGNAL_NOT_FOUND(1643, new byte[]{'0', '2', '0', '0', '0'}, "Unhandled user-defined not found condition"), ERR_SIGNAL_EXCEPTION(1644, new byte[]{'H', 'Y', '0', '0', '0'}, "Unhandled user-defined exception condition"), ERR_RESIGNAL_WITHOUT_ACTIVE_HANDLER(1645, new byte[]{'0', 'K', '0', '0', '0'}, "RESIGNAL when handler not active"), - ERR_SIGNAL_BAD_CONDITION_TYPE(1646, new byte[]{'H', 'Y', '0', '0', '0'}, "SIGNAL/RESIGNAL can only use a " + - "CONDITION defined with SQLSTATE"), + ERR_SIGNAL_BAD_CONDITION_TYPE(1646, new byte[]{'H', 'Y', '0', '0', '0'}, "SIGNAL/RESIGNAL can only use a " + + "CONDITION defined with SQLSTATE"), WARN_COND_ITEM_TRUNCATED(1647, new byte[]{'H', 'Y', '0', '0', '0'}, "Data truncated for condition item '%s'"), ERR_COND_ITEM_TOO_LONG(1648, new byte[]{'H', 'Y', '0', '0', '0'}, "Data too long for condition item '%s'"), ERR_UNKNOWN_LOCALE(1649, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown locale: '%s'"), - ERR_SLAVE_IGNORE_SERVER_IDS(1650, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested server id %d clashes with " + - "the slave startup option --replicate-same-server-id"), - ERR_QUERY_CACHE_DISABLED(1651, new byte[]{'H', 'Y', '0', '0', '0'}, "Query cache is disabled; restart the server " + - "with query_cache_type=1 to enable it"), + ERR_SLAVE_IGNORE_SERVER_IDS(1650, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested server id %d clashes with " + + "the slave startup option --replicate-same-server-id"), + ERR_QUERY_CACHE_DISABLED(1651, new byte[]{'H', 'Y', '0', '0', '0'}, "Query cache is disabled; restart the server " + + "with query_cache_type=1 to enable it"), ERR_SAME_NAME_PARTITION_FIELD(1652, new byte[]{'H', 'Y', '0', '0', '0'}, "Duplicate partition field name '%s'"), - ERR_PARTITION_COLUMN_LIST_ERROR(1653, new byte[]{'H', 'Y', '0', '0', '0'}, "Inconsistency in usage of column " + - "lists for partitioning"), - ERR_WRONG_TYPE_COLUMN_VALUE_ERROR(1654, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition column values of " + - "incorrect type"), + ERR_PARTITION_COLUMN_LIST_ERROR(1653, new byte[]{'H', 'Y', '0', '0', '0'}, "Inconsistency in usage of column " + + "lists for partitioning"), + ERR_WRONG_TYPE_COLUMN_VALUE_ERROR(1654, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition column values of " + + "incorrect type"), ERR_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR(1655, new byte[]{'H', 'Y', '0', '0', '0'}, "Too many fields in '%s'"), ERR_MAXVALUE_IN_VALUES_IN(1656, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot use MAXVALUE as value in VALUES IN"), - ERR_TOO_MANY_VALUES_ERROR(1657, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot have more than one value for this " + - "type of %s partitioning"), - ERR_ROW_SINGLE_PARTITION_FIELD_ERROR(1658, new byte[]{'H', 'Y', '0', '0', '0'}, "Row expressions in VALUES IN " + - "only allowed for multi-field column partitioning"), - ERR_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD(1659, new byte[]{'H', 'Y', '0', '0', '0'}, "Field '%s' is of a not " + - "allowed type for this type of partitioning"), - ERR_PARTITION_FIELDS_TOO_LONG(1660, new byte[]{'H', 'Y', '0', '0', '0'}, "The total length of the partitioning " + - "fields is too large"), - ERR_BINLOG_ROW_ENGINE_AND_STMT_ENGINE(1661, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since both row-incapable engines and statement-incapable engines are " + - "involved" + - "."), - ERR_BINLOG_ROW_MODE_AND_STMT_ENGINE(1662, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine" + - " " + - "limited to statement-based logging."), - ERR_BINLOG_UNSAFE_AND_STMT_ENGINE(1663, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since statement is unsafe, storage engine is limited to " + - "statement-based " + - "logging, and BINLOG_FORMAT = MIXED. %s"), - ERR_BINLOG_ROW_INJECTION_AND_STMT_ENGINE(1664, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since statement is in row format and at least one table uses a storage" + - " engine" + - " limited to statement-based logging."), - ERR_BINLOG_STMT_MODE_AND_ROW_ENGINE(1665, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage " + - "engine " + - "limited to row-based logging.%s"), - ERR_BINLOG_ROW_INJECTION_AND_STMT_MODE(1666, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT."), - ERR_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE(1667, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute " + - "statement: impossible to write to binary log since more than one engine is involved and at least one " + - "engine is " + - "self-logging."), - ERR_BINLOG_UNSAFE_LIMIT(1668, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it uses a " + - "LIMIT clause. This is unsafe because the set of rows included cannot be predicted."), - ERR_BINLOG_UNSAFE_INSERT_DELAYED(1669, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it " + - "uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted."), - ERR_BINLOG_UNSAFE_SYSTEM_TABLE(1670, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it " + - "uses the general log, slow query log, or performance_schema table(s). This is unsafe because system " + - "tables may " + - "differ on slaves."), - ERR_BINLOG_UNSAFE_AUTOINC_COLUMNS(1671, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it " + - "invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values " + - "cannot be " + - "logged correctly."), - ERR_BINLOG_UNSAFE_UDF(1672, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses a UDF which" + - " may not return the same value on the slave."), - ERR_BINLOG_UNSAFE_SYSTEM_VARIABLE(1673, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses" + - " a system variable that may have a different value on the slave."), - ERR_BINLOG_UNSAFE_SYSTEM_FUNCTION(1674, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses" + - " a system function that may return a different value on the slave."), - ERR_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS(1675, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it" + - " accesses a non-transactional table after accessing a transactional table within the same transaction."), + ERR_TOO_MANY_VALUES_ERROR(1657, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot have more than one value for this " + + "type of %s partitioning"), + ERR_ROW_SINGLE_PARTITION_FIELD_ERROR(1658, new byte[]{'H', 'Y', '0', '0', '0'}, "Row expressions in VALUES IN " + + "only allowed for multi-field column partitioning"), + ERR_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD(1659, new byte[]{'H', 'Y', '0', '0', '0'}, "Field '%s' is of a not " + + "allowed type for this type of partitioning"), + ERR_PARTITION_FIELDS_TOO_LONG(1660, new byte[]{'H', 'Y', '0', '0', '0'}, "The total length of the partitioning " + + "fields is too large"), + ERR_BINLOG_ROW_ENGINE_AND_STMT_ENGINE(1661, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since both row-incapable engines and statement-incapable engines are " + + "involved" + + "."), + ERR_BINLOG_ROW_MODE_AND_STMT_ENGINE(1662, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine" + + " " + + "limited to statement-based logging."), + ERR_BINLOG_UNSAFE_AND_STMT_ENGINE(1663, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since statement is unsafe, storage engine is limited to " + + "statement-based " + + "logging, and BINLOG_FORMAT = MIXED. %s"), + ERR_BINLOG_ROW_INJECTION_AND_STMT_ENGINE(1664, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since statement is in row format and at least one table uses a storage" + + " engine" + + " limited to statement-based logging."), + ERR_BINLOG_STMT_MODE_AND_ROW_ENGINE(1665, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage " + + "engine " + + "limited to row-based logging.%s"), + ERR_BINLOG_ROW_INJECTION_AND_STMT_MODE(1666, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT."), + ERR_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE(1667, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute " + + "statement: impossible to write to binary log since more than one engine is involved and at least one " + + "engine is " + + "self-logging."), + ERR_BINLOG_UNSAFE_LIMIT(1668, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it uses a " + + "LIMIT clause. This is unsafe because the set of rows included cannot be predicted."), + ERR_BINLOG_UNSAFE_INSERT_DELAYED(1669, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it " + + "uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted."), + ERR_BINLOG_UNSAFE_SYSTEM_TABLE(1670, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement is unsafe because it " + + "uses the general log, slow query log, or performance_schema table(s). This is unsafe because system " + + "tables may " + + "differ on slaves."), + ERR_BINLOG_UNSAFE_AUTOINC_COLUMNS(1671, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it " + + "invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values " + + "cannot be " + + "logged correctly."), + ERR_BINLOG_UNSAFE_UDF(1672, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses a UDF which" + + " may not return the same value on the slave."), + ERR_BINLOG_UNSAFE_SYSTEM_VARIABLE(1673, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses" + + " a system variable that may have a different value on the slave."), + ERR_BINLOG_UNSAFE_SYSTEM_FUNCTION(1674, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it uses" + + " a system function that may return a different value on the slave."), + ERR_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS(1675, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement is unsafe because it" + + " accesses a non-transactional table after accessing a transactional table within the same transaction."), ERR_MESSAGE_AND_STATEMENT(1676, new byte[]{'H', 'Y', '0', '0', '0'}, "%s Statement: %s"), - ERR_SLAVE_CONVERSION_FAILED(1677, new byte[]{'H', 'Y', '0', '0', '0'}, "Column %d of table '%s.%s' cannot be " + - "converted from type '%s' to type '%s'"), - ERR_SLAVE_CANT_CREATE_CONVERSION(1678, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create conversion table for " + - "table '%s.%s'"), - ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT(1679, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + - "@@session.binlog_format inside a transaction"), + ERR_SLAVE_CONVERSION_FAILED(1677, new byte[]{'H', 'Y', '0', '0', '0'}, "Column %d of table '%s.%s' cannot be " + + "converted from type '%s' to type '%s'"), + ERR_SLAVE_CANT_CREATE_CONVERSION(1678, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't create conversion table for " + + "table '%s.%s'"), + ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT(1679, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + + "@@session.binlog_format inside a transaction"), ERR_PATH_LENGTH(1680, new byte[]{'H', 'Y', '0', '0', '0'}, "The path specified for %s is too long."), - ERR_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT(1681, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' is deprecated and will" + - " be removed in a future release."), - ERR_WRONG_NATIVE_TABLE_STRUCTURE(1682, new byte[]{'H', 'Y', '0', '0', '0'}, "Native table '%s'.'%s' has the wrong" + - " structure"), + ERR_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT(1681, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s' is deprecated and will" + + " be removed in a future release."), + ERR_WRONG_NATIVE_TABLE_STRUCTURE(1682, new byte[]{'H', 'Y', '0', '0', '0'}, "Native table '%s'.'%s' has the wrong" + + " structure"), ERR_WRONG_PERFSCHEMA_USAGE(1683, new byte[]{'H', 'Y', '0', '0', '0'}, "Invalid performance_schema usage."), - ERR_WARN_I_S_SKIPPED_TABLE(1684, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s'.'%s' was skipped since its " + - "definition is being modified by concurrent DDL statement"), - ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT(1685, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + - "@@session.binlog_direct_non_transactional_updates inside a transaction"), - ERR_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT(1686, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + - "binlog direct flag inside a stored function or trigger"), - ERR_SPATIAL_MUST_HAVE_GEOM_COL(1687, new byte[]{'4', '2', '0', '0', '0'}, "A SPATIAL index may only contain a " + - "geometrical type column"), - ERR_TOO_LONG_INDEX_COMMENT(1688, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for index '%s' is too long (max = " + - "%d)"), - ERR_LOCK_ABORTED(1689, new byte[]{'H', 'Y', '0', '0', '0'}, "Wait on a lock was aborted due to a pending " + - "exclusive lock"), + ERR_WARN_I_S_SKIPPED_TABLE(1684, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s'.'%s' was skipped since its " + + "definition is being modified by concurrent DDL statement"), + ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT(1685, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + + "@@session.binlog_direct_non_transactional_updates inside a transaction"), + ERR_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT(1686, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + + "binlog direct flag inside a stored function or trigger"), + ERR_SPATIAL_MUST_HAVE_GEOM_COL(1687, new byte[]{'4', '2', '0', '0', '0'}, "A SPATIAL index may only contain a " + + "geometrical type column"), + ERR_TOO_LONG_INDEX_COMMENT(1688, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for index '%s' is too long (max = " + + "%d)"), + ERR_LOCK_ABORTED(1689, new byte[]{'H', 'Y', '0', '0', '0'}, "Wait on a lock was aborted due to a pending " + + "exclusive lock"), ERR_DATA_OUT_OF_RANGE(1690, new byte[]{'2', '2', '0', '0', '3'}, "%s value is out of range in '%s'"), - ERR_WRONG_SPVAR_TYPE_IN_LIMIT(1691, new byte[]{'H', 'Y', '0', '0', '0'}, "A variable of a non-integer based type " + - "in LIMIT clause"), - ERR_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE(1692, new byte[]{'H', 'Y', '0', '0', '0'}, "Mixing " + - "self-logging and non-self-logging engines in a statement is unsafe."), - ERR_BINLOG_UNSAFE_MIXED_STATEMENT(1693, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement accesses nontransactional" + - " table as well as transactional or temporary table, and writes to any of them."), - ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN(1694, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + - "@@session.sql_log_bin inside a transaction"), - ERR_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN(1695, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + - "sql_log_bin inside a stored function or trigger"), + ERR_WRONG_SPVAR_TYPE_IN_LIMIT(1691, new byte[]{'H', 'Y', '0', '0', '0'}, "A variable of a non-integer based type " + + "in LIMIT clause"), + ERR_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE(1692, new byte[]{'H', 'Y', '0', '0', '0'}, "Mixing " + + "self-logging and non-self-logging engines in a statement is unsafe."), + ERR_BINLOG_UNSAFE_MIXED_STATEMENT(1693, new byte[]{'H', 'Y', '0', '0', '0'}, "Statement accesses nontransactional" + + " table as well as transactional or temporary table, and writes to any of them."), + ERR_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN(1694, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot modify " + + "@@session.sql_log_bin inside a transaction"), + ERR_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN(1695, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change the " + + "sql_log_bin inside a stored function or trigger"), ERR_FAILED_READ_FROM_PAR_FILE(1696, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to read from the .par file"), - ERR_VALUES_IS_NOT_INT_TYPE_ERROR(1697, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES value for partition '%s' must" + - " have type INT"), + ERR_VALUES_IS_NOT_INT_TYPE_ERROR(1697, new byte[]{'H', 'Y', '0', '0', '0'}, "VALUES value for partition '%s' must" + + " have type INT"), ERR_ACCESS_DENIED_NO_PASSWORD_ERROR(1698, new byte[]{'2', '8', '0', '0', '0'}, "Access denied for user '%s'@'%s'"), - ERR_SET_PASSWORD_AUTH_PLUGIN(1699, new byte[]{'H', 'Y', '0', '0', '0'}, "SET PASSWORD has no significance for " + - "users authenticating via plugins"), - ERR_GRANT_PLUGIN_USER_EXISTS(1700, new byte[]{'H', 'Y', '0', '0', '0'}, "GRANT with IDENTIFIED WITH is illegal " + - "because the user %-.*s already exists"), - ERR_TRUNCATE_ILLEGAL_FK(1701, new byte[]{'4', '2', '0', '0', '0'}, "Cannot truncate a table referenced in a " + - "foreign key constraint (%s)"), - ERR_PLUGIN_IS_PERMANENT(1702, new byte[]{'H', 'Y', '0', '0', '0'}, "Plugin '%s' is force_plus_permanent and can " + - "not be unloaded"), - ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN(1703, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for " + - "the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will" + - " " + - "effectively be disabled."), - ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX(1704, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for " + - "the heartbeat period exceeds the value of slave_net_timeout seconds. A sensible value for the period " + - "should be " + - "less than the timeout."), - ERR_STMT_CACHE_FULL(1705, new byte[]{'H', 'Y', '0', '0', '0'}, "Multi-row statements required more than " + - "'max_binlog_stmt_cache_size' bytes of storage; increase this mysqld variable and try again"), - ERR_MULTI_UPDATE_KEY_CONFLICT(1706, new byte[]{'H', 'Y', '0', '0', '0'}, "Primary key/partition key update is not" + - " allowed since the table is updated both as '%s' and '%s'."), - ERR_TABLE_NEEDS_REBUILD(1707, new byte[]{'H', 'Y', '0', '0', '0'}, "Table rebuild required. Please do \"ALTER " + - "TABLE `%s` FORCE\" or dump/reload to fix it!"), - WARN_OPTION_BELOW_LIMIT(1708, new byte[]{'H', 'Y', '0', '0', '0'}, "The value of '%s' should be no less than the " + - "value of '%s'"), - ERR_INDEX_COLUMN_TOO_LONG(1709, new byte[]{'H', 'Y', '0', '0', '0'}, "Index column size too large. The maximum " + - "column size is %d bytes."), + ERR_SET_PASSWORD_AUTH_PLUGIN(1699, new byte[]{'H', 'Y', '0', '0', '0'}, "SET PASSWORD has no significance for " + + "users authenticating via plugins"), + ERR_GRANT_PLUGIN_USER_EXISTS(1700, new byte[]{'H', 'Y', '0', '0', '0'}, "GRANT with IDENTIFIED WITH is illegal " + + "because the user %-.*s already exists"), + ERR_TRUNCATE_ILLEGAL_FK(1701, new byte[]{'4', '2', '0', '0', '0'}, "Cannot truncate a table referenced in a " + + "foreign key constraint (%s)"), + ERR_PLUGIN_IS_PERMANENT(1702, new byte[]{'H', 'Y', '0', '0', '0'}, "Plugin '%s' is force_plus_permanent and can " + + "not be unloaded"), + ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN(1703, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for " + + "the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will" + + " " + + "effectively be disabled."), + ERR_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX(1704, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value for " + + "the heartbeat period exceeds the value of slave_net_timeout seconds. A sensible value for the period " + + "should be " + + "less than the timeout."), + ERR_STMT_CACHE_FULL(1705, new byte[]{'H', 'Y', '0', '0', '0'}, "Multi-row statements required more than " + + "'max_binlog_stmt_cache_size' bytes of storage; increase this mysqld variable and try again"), + ERR_MULTI_UPDATE_KEY_CONFLICT(1706, new byte[]{'H', 'Y', '0', '0', '0'}, "Primary key/partition key update is not" + + " allowed since the table is updated both as '%s' and '%s'."), + ERR_TABLE_NEEDS_REBUILD(1707, new byte[]{'H', 'Y', '0', '0', '0'}, "Table rebuild required. Please do \"ALTER " + + "TABLE `%s` FORCE\" or dump/reload to fix it!"), + WARN_OPTION_BELOW_LIMIT(1708, new byte[]{'H', 'Y', '0', '0', '0'}, "The value of '%s' should be no less than the " + + "value of '%s'"), + ERR_INDEX_COLUMN_TOO_LONG(1709, new byte[]{'H', 'Y', '0', '0', '0'}, "Index column size too large. The maximum " + + "column size is %d bytes."), ERR_ERROR_IN_TRIGGER_BODY(1710, new byte[]{'H', 'Y', '0', '0', '0'}, "Trigger '%s' has an error in its body: '%s'"), - ERR_ERROR_IN_UNKNOWN_TRIGGER_BODY(1711, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown trigger has an error in its" + - " body: '%s'"), + ERR_ERROR_IN_UNKNOWN_TRIGGER_BODY(1711, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown trigger has an error in its" + + " body: '%s'"), ERR_INDEX_CORRUPT(1712, new byte[]{'H', 'Y', '0', '0', '0'}, "Index %s is corrupted"), ERR_UNDO_RECORD_TOO_BIG(1713, new byte[]{'H', 'Y', '0', '0', '0'}, "Undo log record is too big."), - ERR_BINLOG_UNSAFE_INSERT_IGNORE_SELECT(1714, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT IGNORE... SELECT is " + - "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + - "ignored. " + - "This order cannot be predicted and may differ on master and the slave."), - ERR_BINLOG_UNSAFE_INSERT_SELECT_UPDATE(1715, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT... SELECT... ON " + - "DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines " + - "which (if" + - " any) rows are updated. This order cannot be predicted and may differ on master and the slave."), - ERR_BINLOG_UNSAFE_REPLACE_SELECT(1716, new byte[]{'H', 'Y', '0', '0', '0'}, "REPLACE... SELECT is unsafe because " + - "the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This " + - "order " + - "cannot be predicted and may differ on master and the slave."), - ERR_BINLOG_UNSAFE_CREATE_IGNORE_SELECT(1717, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE... IGNORE SELECT is " + - "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + - "ignored. " + - "This order cannot be predicted and may differ on master and the slave."), - ERR_BINLOG_UNSAFE_CREATE_REPLACE_SELECT(1718, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE... REPLACE SELECT is " + - "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + - "replaced." + - " This order cannot be predicted and may differ on master and the slave."), - ERR_BINLOG_UNSAFE_UPDATE_IGNORE(1719, new byte[]{'H', 'Y', '0', '0', '0'}, "UPDATE IGNORE is unsafe because the " + - "order in which rows are updated determines which (if any) rows are ignored. This order cannot be " + - "predicted and " + - "may differ on master and the slave."), + ERR_BINLOG_UNSAFE_INSERT_IGNORE_SELECT(1714, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT IGNORE... SELECT is " + + "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + + "ignored. " + + "This order cannot be predicted and may differ on master and the slave."), + ERR_BINLOG_UNSAFE_INSERT_SELECT_UPDATE(1715, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT... SELECT... ON " + + "DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines " + + "which (if" + + " any) rows are updated. This order cannot be predicted and may differ on master and the slave."), + ERR_BINLOG_UNSAFE_REPLACE_SELECT(1716, new byte[]{'H', 'Y', '0', '0', '0'}, "REPLACE... SELECT is unsafe because " + + "the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This " + + "order " + + "cannot be predicted and may differ on master and the slave."), + ERR_BINLOG_UNSAFE_CREATE_IGNORE_SELECT(1717, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE... IGNORE SELECT is " + + "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + + "ignored. " + + "This order cannot be predicted and may differ on master and the slave."), + ERR_BINLOG_UNSAFE_CREATE_REPLACE_SELECT(1718, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE... REPLACE SELECT is " + + "unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are " + + "replaced." + + " This order cannot be predicted and may differ on master and the slave."), + ERR_BINLOG_UNSAFE_UPDATE_IGNORE(1719, new byte[]{'H', 'Y', '0', '0', '0'}, "UPDATE IGNORE is unsafe because the " + + "order in which rows are updated determines which (if any) rows are ignored. This order cannot be " + + "predicted and " + + "may differ on master and the slave."), ERR_UNUSED_15(1720, new byte[]{}, "You should never see it"), ERR_UNUSED_16(1721, new byte[]{}, "You should never see it"), - ERR_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT(1722, new byte[]{'H', 'Y', '0', '0', '0'}, "Statements writing to a table " + - "with an auto-increment column after selecting from another table are unsafe because the order in which " + - "rows are" + - " retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ " + - "on " + - "master and the slave."), - ERR_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC(1723, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE TABLE... SELECT... on " + - "a table with an auto-increment column is unsafe because the order in which rows are retrieved by the " + - "SELECT " + - "determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and" + - " the " + - "slave."), - ERR_BINLOG_UNSAFE_INSERT_TWO_KEYS(1724, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT... ON DUPLICATE KEY UPDATE " + - "on a table with more than one UNIQUE KEY is unsafe"), + ERR_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT(1722, new byte[]{'H', 'Y', '0', '0', '0'}, "Statements writing to a table " + + "with an auto-increment column after selecting from another table are unsafe because the order in which " + + "rows are" + + " retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ " + + "on " + + "master and the slave."), + ERR_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC(1723, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE TABLE... SELECT... on " + + "a table with an auto-increment column is unsafe because the order in which rows are retrieved by the " + + "SELECT " + + "determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and" + + " the " + + "slave."), + ERR_BINLOG_UNSAFE_INSERT_TWO_KEYS(1724, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT... ON DUPLICATE KEY UPDATE " + + "on a table with more than one UNIQUE KEY is unsafe"), ERR_TABLE_IN_FK_CHECK(1725, new byte[]{'H', 'Y', '0', '0', '0'}, "Table is being used in foreign key check."), - ERR_UNSUPPORTED_ENGINE(1726, new byte[]{'H', 'Y', '0', '0', '0'}, "Storage engine '%s' does not support system " + - "tables. [%s.%s]"), - ERR_BINLOG_UNSAFE_AUTOINC_NOT_FIRST(1727, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT into autoincrement field " + - "which is not the first part in the composed primary key is unsafe."), - ERR_CANNOT_LOAD_FROM_TABLE_V2(1728, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot load from %s.%s. The table is " + - "probably corrupted"), - ERR_MASTER_DELAY_VALUE_OUT_OF_RANGE(1729, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value %s for the " + - "master delay exceeds the maximum %u"), - ERR_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT(1730, new byte[]{'H', 'Y', '0', '0', '0'}, "Only " + - "Format_description_log_event and row events are allowed in BINLOG statements (but %s was provided"), - ERR_PARTITION_EXCHANGE_DIFFERENT_OPTION(1731, new byte[]{'H', 'Y', '0', '0', '0'}, "Non matching attribute '%s' " + - "between partition and table"), - ERR_PARTITION_EXCHANGE_PART_TABLE(1732, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition is" + - " partitioned: '%s'"), - ERR_PARTITION_EXCHANGE_TEMP_TABLE(1733, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition is" + - " temporary: '%s'"), - ERR_PARTITION_INSTEAD_OF_SUBPARTITION(1734, new byte[]{'H', 'Y', '0', '0', '0'}, "Subpartitioned table, use " + - "subpartition instead of partition"), + ERR_UNSUPPORTED_ENGINE(1726, new byte[]{'H', 'Y', '0', '0', '0'}, "Storage engine '%s' does not support system " + + "tables. [%s.%s]"), + ERR_BINLOG_UNSAFE_AUTOINC_NOT_FIRST(1727, new byte[]{'H', 'Y', '0', '0', '0'}, "INSERT into autoincrement field " + + "which is not the first part in the composed primary key is unsafe."), + ERR_CANNOT_LOAD_FROM_TABLE_V2(1728, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot load from %s.%s. The table is " + + "probably corrupted"), + ERR_MASTER_DELAY_VALUE_OUT_OF_RANGE(1729, new byte[]{'H', 'Y', '0', '0', '0'}, "The requested value %s for the " + + "master delay exceeds the maximum %u"), + ERR_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT(1730, new byte[]{'H', 'Y', '0', '0', '0'}, "Only " + + "Format_description_log_event and row events are allowed in BINLOG statements (but %s was provided"), + ERR_PARTITION_EXCHANGE_DIFFERENT_OPTION(1731, new byte[]{'H', 'Y', '0', '0', '0'}, "Non matching attribute '%s' " + + "between partition and table"), + ERR_PARTITION_EXCHANGE_PART_TABLE(1732, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition is" + + " partitioned: '%s'"), + ERR_PARTITION_EXCHANGE_TEMP_TABLE(1733, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition is" + + " temporary: '%s'"), + ERR_PARTITION_INSTEAD_OF_SUBPARTITION(1734, new byte[]{'H', 'Y', '0', '0', '0'}, "Subpartitioned table, use " + + "subpartition instead of partition"), ERR_UNKNOWN_PARTITION(1735, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown partition '%s' in table '%s'"), ERR_TABLES_DIFFERENT_METADATA(1736, new byte[]{'H', 'Y', '0', '0', '0'}, "Tables have different definitions"), - ERR_ROW_DOES_NOT_MATCH_PARTITION(1737, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a row that does not match the " + - "partition"), - ERR_BINLOG_CACHE_SIZE_GREATER_THAN_MAX(1738, new byte[]{'H', 'Y', '0', '0', '0'}, "Option binlog_cache_size (%d)" + - " is greater than max_binlog_cache_size (%d); setting binlog_cache_size equal to max_binlog_cache_size."), - ERR_WARN_INDEX_NOT_APPLICABLE(1739, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot use %s access on index '%s' due " + - "to type or collation conversion on field '%s'"), - ERR_PARTITION_EXCHANGE_FOREIGN_KEY(1740, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition " + - "has foreign key references: '%s'"), + ERR_ROW_DOES_NOT_MATCH_PARTITION(1737, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a row that does not match the " + + "partition"), + ERR_BINLOG_CACHE_SIZE_GREATER_THAN_MAX(1738, new byte[]{'H', 'Y', '0', '0', '0'}, "Option binlog_cache_size (%d)" + + " is greater than max_binlog_cache_size (%d); setting binlog_cache_size equal to max_binlog_cache_size."), + ERR_WARN_INDEX_NOT_APPLICABLE(1739, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot use %s access on index '%s' due " + + "to type or collation conversion on field '%s'"), + ERR_PARTITION_EXCHANGE_FOREIGN_KEY(1740, new byte[]{'H', 'Y', '0', '0', '0'}, "Table to exchange with partition " + + "has foreign key references: '%s'"), ERR_NO_SUCH_KEY_VALUE(1741, new byte[]{'H', 'Y', '0', '0', '0'}, "Key value '%s' was not found in table '%s.%s'"), ERR_RPL_INFO_DATA_TOO_LONG(1742, new byte[]{'H', 'Y', '0', '0', '0'}, "Data for column '%s' too long"), - ERR_NETWORK_READ_EVENT_CHECKSUM_FAILURE(1743, new byte[]{'H', 'Y', '0', '0', '0'}, "Replication event checksum " + - "verification failed while reading from network."), - ERR_BINLOG_READ_EVENT_CHECKSUM_FAILURE(1744, new byte[]{'H', 'Y', '0', '0', '0'}, "Replication event checksum " + - "verification failed while reading from a log file."), - ERR_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX(1745, new byte[]{'H', 'Y', '0', '0', '0'}, "Option " + - "binlog_stmt_cache_size (%d) is greater than max_binlog_stmt_cache_size (%d); setting " + - "binlog_stmt_cache_size " + - "equal to max_binlog_stmt_cache_size."), - ERR_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT(1746, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't update table '%s' " + - "while '%s' is being created."), - ERR_PARTITION_CLAUSE_ON_NONPARTITIONED(1747, new byte[]{'H', 'Y', '0', '0', '0'}, "PARTITION () clause on non " + - "partitioned table"), - ERR_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET(1748, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a row not matching " + - "the given partition set"), + ERR_NETWORK_READ_EVENT_CHECKSUM_FAILURE(1743, new byte[]{'H', 'Y', '0', '0', '0'}, "Replication event checksum " + + "verification failed while reading from network."), + ERR_BINLOG_READ_EVENT_CHECKSUM_FAILURE(1744, new byte[]{'H', 'Y', '0', '0', '0'}, "Replication event checksum " + + "verification failed while reading from a log file."), + ERR_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX(1745, new byte[]{'H', 'Y', '0', '0', '0'}, "Option " + + "binlog_stmt_cache_size (%d) is greater than max_binlog_stmt_cache_size (%d); setting " + + "binlog_stmt_cache_size " + + "equal to max_binlog_stmt_cache_size."), + ERR_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT(1746, new byte[]{'H', 'Y', '0', '0', '0'}, "Can't update table '%s' " + + "while '%s' is being created."), + ERR_PARTITION_CLAUSE_ON_NONPARTITIONED(1747, new byte[]{'H', 'Y', '0', '0', '0'}, "PARTITION () clause on non " + + "partitioned table"), + ERR_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET(1748, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a row not matching " + + "the given partition set"), ERR_NO_SUCH_PARTITION(1749, new byte[]{'H', 'Y', '0', '0', '0'}, "partition '%s' doesn't exist"), - ERR_CHANGE_RPL_INFO_REPOSITORY_FAILURE(1750, new byte[]{'H', 'Y', '0', '0', '0'}, "Failure while changing the " + - "type of replication repository: %s."), - ERR_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE(1751, new byte[]{'H', 'Y', '0', '0', '0'}, "The " + - "creation of some temporary tables could not be rolled back."), - ERR_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE(1752, new byte[]{'H', 'Y', '0', '0', '0'}, "Some " + - "temporary tables were dropped, but these operations could not be rolled back."), - ERR_MTS_FEATURE_IS_NOT_SUPPORTED(1753, new byte[]{'H', 'Y', '0', '0', '0'}, "%s is not supported in " + - "multi-threaded slave mode. %s"), - ERR_MTS_UPDATED_DBS_GREATER_MAX(1754, new byte[]{'H', 'Y', '0', '0', '0'}, "The number of modified databases " + - "exceeds the maximum %d; the database names will not be included in the replication event metadata."), - ERR_MTS_CANT_PARALLEL(1755, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute the current event group in the " + - "parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this " + - "event " + - "group in parallel mode. Reason: %s."), + ERR_CHANGE_RPL_INFO_REPOSITORY_FAILURE(1750, new byte[]{'H', 'Y', '0', '0', '0'}, "Failure while changing the " + + "type of replication repository: %s."), + ERR_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE(1751, new byte[]{'H', 'Y', '0', '0', '0'}, "The " + + "creation of some temporary tables could not be rolled back."), + ERR_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE(1752, new byte[]{'H', 'Y', '0', '0', '0'}, "Some " + + "temporary tables were dropped, but these operations could not be rolled back."), + ERR_MTS_FEATURE_IS_NOT_SUPPORTED(1753, new byte[]{'H', 'Y', '0', '0', '0'}, "%s is not supported in " + + "multi-threaded slave mode. %s"), + ERR_MTS_UPDATED_DBS_GREATER_MAX(1754, new byte[]{'H', 'Y', '0', '0', '0'}, "The number of modified databases " + + "exceeds the maximum %d; the database names will not be included in the replication event metadata."), + ERR_MTS_CANT_PARALLEL(1755, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute the current event group in the " + + "parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this " + + "event " + + "group in parallel mode. Reason: %s."), ERR_MTS_INCONSISTENT_DATA(1756, new byte[]{'H', 'Y', '0', '0', '0'}, "%s"), - ERR_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING(1757, new byte[]{'H', 'Y', '0', '0', '0'}, "FULLTEXT index is not " + - "supported for partitioned tables."), + ERR_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING(1757, new byte[]{'H', 'Y', '0', '0', '0'}, "FULLTEXT index is not " + + "supported for partitioned tables."), ERR_DA_INVALID_CONDITION_NUMBER(1758, new byte[]{'3', '5', '0', '0', '0'}, "Invalid condition number"), - ERR_INSECURE_PLAIN_TEXT(1759, new byte[]{'H', 'Y', '0', '0', '0'}, "Sending passwords in plain text without " + - "SSL/TLS is extremely insecure."), - ERR_INSECURE_CHANGE_MASTER(1760, new byte[]{'H', 'Y', '0', '0', '0'}, "Storing MySQL user name or password " + - "information in the master info repository is not secure and is therefore not recommended. Please " + - "consider using" + - " the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL " + - "Manual " + - "for more information."), - ERR_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO(1761, new byte[]{'2', '3', '0', '0', '0'}, "Foreign key constraint for " + - "table '%s', record '%s' would lead to a duplicate entry in table '%s', key '%s'"), - ERR_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO(1762, new byte[]{'2', '3', '0', '0', '0'}, "Foreign key constraint " + - "for table '%s', record '%s' would lead to a duplicate entry in a child table"), - ERR_SQLTHREAD_WITH_SECURE_SLAVE(1763, new byte[]{'H', 'Y', '0', '0', '0'}, "Setting authentication options is not" + - " possible when only the Slave SQL Thread is being started."), - ERR_TABLE_HAS_NO_FT(1764, new byte[]{'H', 'Y', '0', '0', '0'}, "The table does not have FULLTEXT index to support" + - " this query"), - ERR_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER(1765, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s " + - "cannot be set in stored functions or triggers."), - ERR_VARIABLE_NOT_SETTABLE_IN_TRANSACTION(1766, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s " + - "cannot be set when there is an ongoing transaction."), - ERR_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST(1767, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable @@SESSION" + - ".GTID_NEXT has the value %s, which is not listed in @@SESSION.GTID_NEXT_LIST."), + ERR_INSECURE_PLAIN_TEXT(1759, new byte[]{'H', 'Y', '0', '0', '0'}, "Sending passwords in plain text without " + + "SSL/TLS is extremely insecure."), + ERR_INSECURE_CHANGE_MASTER(1760, new byte[]{'H', 'Y', '0', '0', '0'}, "Storing MySQL user name or password " + + "information in the master info repository is not secure and is therefore not recommended. Please " + + "consider using" + + " the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL " + + "Manual " + + "for more information."), + ERR_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO(1761, new byte[]{'2', '3', '0', '0', '0'}, "Foreign key constraint for " + + "table '%s', record '%s' would lead to a duplicate entry in table '%s', key '%s'"), + ERR_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO(1762, new byte[]{'2', '3', '0', '0', '0'}, "Foreign key constraint " + + "for table '%s', record '%s' would lead to a duplicate entry in a child table"), + ERR_SQLTHREAD_WITH_SECURE_SLAVE(1763, new byte[]{'H', 'Y', '0', '0', '0'}, "Setting authentication options is not" + + " possible when only the Slave SQL Thread is being started."), + ERR_TABLE_HAS_NO_FT(1764, new byte[]{'H', 'Y', '0', '0', '0'}, "The table does not have FULLTEXT index to support" + + " this query"), + ERR_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER(1765, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s " + + "cannot be set in stored functions or triggers."), + ERR_VARIABLE_NOT_SETTABLE_IN_TRANSACTION(1766, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s " + + "cannot be set when there is an ongoing transaction."), + ERR_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST(1767, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable @@SESSION" + + ".GTID_NEXT has the value %s, which is not listed in @@SESSION.GTID_NEXT_LIST."), ERR_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL(1768, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable @@SESSION.GTID_NEXT cannot change inside a transaction."), - ERR_SET_STATEMENT_CANNOT_INVOKE_FUNCTION(1769, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement 'SET %s' " + - "cannot invoke a stored function."), - ERR_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL(1770, new byte[]{'H', 'Y', '0', '0', '0'}, "The " + - "system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL."), - ERR_SKIPPING_LOGGED_TRANSACTION(1771, new byte[]{'H', 'Y', '0', '0', '0'}, "Skipping transaction %s because it " + - "has already been executed and logged."), - ERR_MALFORMED_GTID_SET_SPECIFICATION(1772, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed GTID set specification" + - " '%s'."), + ERR_SET_STATEMENT_CANNOT_INVOKE_FUNCTION(1769, new byte[]{'H', 'Y', '0', '0', '0'}, "The statement 'SET %s' " + + "cannot invoke a stored function."), + ERR_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL(1770, new byte[]{'H', 'Y', '0', '0', '0'}, "The " + + "system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL."), + ERR_SKIPPING_LOGGED_TRANSACTION(1771, new byte[]{'H', 'Y', '0', '0', '0'}, "Skipping transaction %s because it " + + "has already been executed and logged."), + ERR_MALFORMED_GTID_SET_SPECIFICATION(1772, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed GTID set specification" + + " '%s'."), ERR_MALFORMED_GTID_SET_ENCODING(1773, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed GTID set encoding."), ERR_MALFORMED_GTID_SPECIFICATION(1774, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed GTID specification '%s'."), - ERR_GNO_EXHAUSTED(1775, new byte[]{'H', 'Y', '0', '0', '0'}, "Impossible to generate Global Transaction " + - "Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid."), - ERR_BAD_SLAVE_AUTO_POSITION(1776, new byte[]{'H', 'Y', '0', '0', '0'}, "Parameters MASTER_LOG_FILE, " + - "MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active."), - ERR_AUTO_POSITION_REQUIRES_GTID_MODE_ON(1777, new byte[]{'H', 'Y', '0', '0', '0'}, "CHANGE MASTER TO " + - "MASTER_AUTO_POSITION = 1 can only be executed when @@GLOBAL.GTID_MODE = ON."), - ERR_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET(1778, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot " + - "execute statements with implicit commit inside a transaction when @@SESSION.GTID_NEXT != AUTOMATIC."), + ERR_GNO_EXHAUSTED(1775, new byte[]{'H', 'Y', '0', '0', '0'}, "Impossible to generate Global Transaction " + + "Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid."), + ERR_BAD_SLAVE_AUTO_POSITION(1776, new byte[]{'H', 'Y', '0', '0', '0'}, "Parameters MASTER_LOG_FILE, " + + "MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active."), + ERR_AUTO_POSITION_REQUIRES_GTID_MODE_ON(1777, new byte[]{'H', 'Y', '0', '0', '0'}, "CHANGE MASTER TO " + + "MASTER_AUTO_POSITION = 1 can only be executed when @@GLOBAL.GTID_MODE = ON."), + ERR_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET(1778, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot " + + "execute statements with implicit commit inside a transaction when @@SESSION.GTID_NEXT != AUTOMATIC."), ERR_GTID_MODE_2_OR_3_REQUIRES_DISABLE_GTID_UNSAFE_STATEMENTS_ON(1779, new byte[]{'H', 'Y', '0', '0', '0'}, "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires DISABLE_GTID_UNSAFE_STATEMENTS = 1."), - ERR_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON(1779, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + - ".GTID_MODE = ON or UPGRADE_STEP_2 requires @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1."), - ERR_GTID_MODE_REQUIRES_BINLOG(1780, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_MODE = ON or " + - "UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates."), - ERR_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF(1781, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION" + - ".GTID_NEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTID_MODE = OFF."), - ERR_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON(1782, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION" + - ".GTID_NEXT cannot be set to ANONYMOUS when @@GLOBAL.GTID_MODE = ON."), + ERR_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON(1779, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + + ".GTID_MODE = ON or UPGRADE_STEP_2 requires @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1."), + ERR_GTID_MODE_REQUIRES_BINLOG(1780, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_MODE = ON or " + + "UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates."), + ERR_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF(1781, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION" + + ".GTID_NEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTID_MODE = OFF."), + ERR_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON(1782, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION" + + ".GTID_NEXT cannot be set to ANONYMOUS when @@GLOBAL.GTID_MODE = ON."), ERR_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF(1783, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION.GTID_NEXT_LIST cannot be set to a non-NULL value when @@GLOBAL.GTID_MODE = OFF."), - ERR_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF(1784, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a Gtid_log_event or " + - "Previous_gtids_log_event when @@GLOBAL.GTID_MODE = OFF."), - ERR_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE(1785, new byte[]{'H', 'Y', '0', '0', '0'}, "When @@GLOBAL" + - ".ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either " + - "autocommitted " + - "statements or single-statement transactions, and never in the same statement as updates to transactional" + - " " + - "tables."), - ERR_GTID_UNSAFE_CREATE_SELECT(1786, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE TABLE ... SELECT is forbidden " + - "when @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1."), - ERR_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION(1787, new byte[]{'H', 'Y', '0', '0', '0'}, "When " + - "@@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE " + - "can be " + - "executed in a non-transactional context only, and require that AUTOCOMMIT = 1."), - ERR_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME(1788, new byte[]{'H', 'Y', '0', '0', '0'}, "The value of " + - "@@GLOBAL.GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON." + - " Also " + - "note that this value must be stepped up or down simultaneously on all servers; see the Manual for " + - "instructions" + - "."), - ERR_MASTER_HAS_PURGED_REQUIRED_GTIDS(1789, new byte[]{'H', 'Y', '0', '0', '0'}, "The slave is connecting using " + - "CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that " + - "the " + - "slave requires."), - ERR_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID(1790, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION.GTID_NEXT cannot be" + - " changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK."), + ERR_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF(1784, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a Gtid_log_event or " + + "Previous_gtids_log_event when @@GLOBAL.GTID_MODE = OFF."), + ERR_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE(1785, new byte[]{'H', 'Y', '0', '0', '0'}, "When @@GLOBAL" + + ".ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either " + + "autocommitted " + + "statements or single-statement transactions, and never in the same statement as updates to transactional" + + " " + + "tables."), + ERR_GTID_UNSAFE_CREATE_SELECT(1786, new byte[]{'H', 'Y', '0', '0', '0'}, "CREATE TABLE ... SELECT is forbidden " + + "when @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1."), + ERR_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION(1787, new byte[]{'H', 'Y', '0', '0', '0'}, "When " + + "@@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE " + + "can be " + + "executed in a non-transactional context only, and require that AUTOCOMMIT = 1."), + ERR_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME(1788, new byte[]{'H', 'Y', '0', '0', '0'}, "The value of " + + "@@GLOBAL.GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON." + + " Also " + + "note that this value must be stepped up or down simultaneously on all servers; see the Manual for " + + "instructions" + + "."), + ERR_MASTER_HAS_PURGED_REQUIRED_GTIDS(1789, new byte[]{'H', 'Y', '0', '0', '0'}, "The slave is connecting using " + + "CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that " + + "the " + + "slave requires."), + ERR_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID(1790, new byte[]{'H', 'Y', '0', '0', '0'}, "@@SESSION.GTID_NEXT cannot be" + + " changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK."), ERR_UNKNOWN_EXPLAIN_FORMAT(1791, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown EXPLAIN format name: '%s'"), - ERR_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION(1792, new byte[]{'2', '5', '0', '0', '6'}, "Cannot execute statement in" + - " a READ ONLY transaction."), - ERR_TOO_LONG_TABLE_PARTITION_COMMENT(1793, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for table partition '%s'" + - " is too long (max = %d"), - ERR_SLAVE_CONFIGURATION(1794, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave is not configured or failed to " + - "initialize properly. You must at least set --server-id to enable either a master or a slave. Additional " + - "error " + - "messages can be found in the MySQL error log."), - ERR_INNODB_FT_LIMIT(1795, new byte[]{'H', 'Y', '0', '0', '0'}, "InnoDB presently supports one FULLTEXT index " + - "creation at a time"), - ERR_INNODB_NO_FT_TEMP_TABLE(1796, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create FULLTEXT index on temporary" + - " InnoDB table"), - ERR_INNODB_FT_WRONG_DOCID_COLUMN(1797, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' is of wrong type for an " + - "InnoDB FULLTEXT index"), - ERR_INNODB_FT_WRONG_DOCID_INDEX(1798, new byte[]{'H', 'Y', '0', '0', '0'}, "Index '%s' is of wrong type for an " + - "InnoDB FULLTEXT index"), - ERR_INNODB_ONLINE_LOG_TOO_BIG(1799, new byte[]{'H', 'Y', '0', '0', '0'}, "Creating index '%s' required more than " + - "'innodb_online_alter_log_max_size' bytes of modification log. Please try again."), + ERR_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION(1792, new byte[]{'2', '5', '0', '0', '6'}, "Cannot execute statement in" + + " a READ ONLY transaction."), + ERR_TOO_LONG_TABLE_PARTITION_COMMENT(1793, new byte[]{'H', 'Y', '0', '0', '0'}, "Comment for table partition '%s'" + + " is too long (max = %d"), + ERR_SLAVE_CONFIGURATION(1794, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave is not configured or failed to " + + "initialize properly. You must at least set --server-id to enable either a master or a slave. Additional " + + "error " + + "messages can be found in the MySQL error log."), + ERR_INNODB_FT_LIMIT(1795, new byte[]{'H', 'Y', '0', '0', '0'}, "InnoDB presently supports one FULLTEXT index " + + "creation at a time"), + ERR_INNODB_NO_FT_TEMP_TABLE(1796, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot create FULLTEXT index on temporary" + + " InnoDB table"), + ERR_INNODB_FT_WRONG_DOCID_COLUMN(1797, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' is of wrong type for an " + + "InnoDB FULLTEXT index"), + ERR_INNODB_FT_WRONG_DOCID_INDEX(1798, new byte[]{'H', 'Y', '0', '0', '0'}, "Index '%s' is of wrong type for an " + + "InnoDB FULLTEXT index"), + ERR_INNODB_ONLINE_LOG_TOO_BIG(1799, new byte[]{'H', 'Y', '0', '0', '0'}, "Creating index '%s' required more than " + + "'innodb_online_alter_log_max_size' bytes of modification log. Please try again."), ERR_UNKNOWN_ALTER_ALGORITHM(1800, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown ALGORITHM '%s'"), ERR_UNKNOWN_ALTER_LOCK(1801, new byte[]{'H', 'Y', '0', '0', '0'}, "Unknown LOCK type '%s'"), - ERR_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS(1802, new byte[]{'H', 'Y', '0', '0', '0'}, "CHANGE MASTER cannot be " + - "executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or " + - "START " + - "SLAVE UNTIL."), - ERR_MTS_RECOVERY_FAILURE(1803, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot recover after SLAVE errored out in " + - "parallel execution mode. Additional error messages can be found in the MySQL error log."), - ERR_MTS_RESET_WORKERS(1804, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot clean up worker info tables. Additional " + - "error messages can be found in the MySQL error log."), - ERR_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2(1805, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of %s.%s is " + - "wrong. Expected %d, found %d. The table is probably corrupted"), - ERR_SLAVE_SILENT_RETRY_TRANSACTION(1806, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave must silently retry current " + - "transaction"), - ERR_DISCARD_FK_CHECKS_RUNNING(1807, new byte[]{'H', 'Y', '0', '0', '0'}, "There is a foreign key check running on" + - " table '%s'. Cannot discard the table."), + ERR_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS(1802, new byte[]{'H', 'Y', '0', '0', '0'}, "CHANGE MASTER cannot be " + + "executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or " + + "START " + + "SLAVE UNTIL."), + ERR_MTS_RECOVERY_FAILURE(1803, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot recover after SLAVE errored out in " + + "parallel execution mode. Additional error messages can be found in the MySQL error log."), + ERR_MTS_RESET_WORKERS(1804, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot clean up worker info tables. Additional " + + "error messages can be found in the MySQL error log."), + ERR_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2(1805, new byte[]{'H', 'Y', '0', '0', '0'}, "Column count of %s.%s is " + + "wrong. Expected %d, found %d. The table is probably corrupted"), + ERR_SLAVE_SILENT_RETRY_TRANSACTION(1806, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave must silently retry current " + + "transaction"), + ERR_DISCARD_FK_CHECKS_RUNNING(1807, new byte[]{'H', 'Y', '0', '0', '0'}, "There is a foreign key check running on" + + " table '%s'. Cannot discard the table."), ERR_TABLE_SCHEMA_MISMATCH(1808, new byte[]{'H', 'Y', '0', '0', '0'}, "Schema mismatch (%s"), ERR_TABLE_IN_SYSTEM_TABLESPACE(1809, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' in system tablespace"), ERR_IO_READ_ERROR(1810, new byte[]{'H', 'Y', '0', '0', '0'}, "IO Read error: (%d, %s) %s"), ERR_IO_WRITE_ERROR(1811, new byte[]{'H', 'Y', '0', '0', '0'}, "IO Write error: (%d, %s) %s"), ERR_TABLESPACE_MISSING(1812, new byte[]{'H', 'Y', '0', '0', '0'}, "Tablespace is missing for table '%s'"), - ERR_TABLESPACE_EXISTS(1813, new byte[]{'H', 'Y', '0', '0', '0'}, "Tablespace for table '%s' exists. Please " + - "DISCARD the tablespace before IMPORT."), + ERR_TABLESPACE_EXISTS(1813, new byte[]{'H', 'Y', '0', '0', '0'}, "Tablespace for table '%s' exists. Please " + + "DISCARD the tablespace before IMPORT."), ERR_TABLESPACE_DISCARDED(1814, new byte[]{'H', 'Y', '0', '0', '0'}, "Tablespace has been discarded for table '%s'"), ERR_INTERNAL_ERROR(1815, new byte[]{'H', 'Y', '0', '0', '0'}, "Internal error: %s"), - ERR_INNODB_IMPORT_ERROR(1816, new byte[]{'H', 'Y', '0', '0', '0'}, "ALTER TABLE '%s' IMPORT TABLESPACE failed " + - "with error %d : '%s'"), + ERR_INNODB_IMPORT_ERROR(1816, new byte[]{'H', 'Y', '0', '0', '0'}, "ALTER TABLE '%s' IMPORT TABLESPACE failed " + + "with error %d : '%s'"), ERR_INNODB_INDEX_CORRUPT(1817, new byte[]{'H', 'Y', '0', '0', '0'}, "Index corrupt: %s"), - ERR_INVALID_YEAR_COLUMN_LENGTH(1818, new byte[]{'H', 'Y', '0', '0', '0'}, "YEAR(%d) column type is deprecated. " + - "Creating YEAR(4) column instead."), - ERR_NOT_VALID_PASSWORD(1819, new byte[]{'H', 'Y', '0', '0', '0'}, "Your password does not satisfy the current " + - "policy requirements"), - ERR_MUST_CHANGE_PASSWORD(1820, new byte[]{'H', 'Y', '0', '0', '0'}, "You must SET PASSWORD before executing this " + - "statement"), - ERR_FK_NO_INDEX_CHILD(1821, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constaint. " + - "Missing index for constraint '%s' in the foreign table '%s'"), - ERR_FK_NO_INDEX_PARENT(1822, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constaint. " + - "Missing index for constraint '%s' in the referenced table '%s'"), - ERR_FK_FAIL_ADD_SYSTEM(1823, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constraint '%s' " + - "to system tables"), + ERR_INVALID_YEAR_COLUMN_LENGTH(1818, new byte[]{'H', 'Y', '0', '0', '0'}, "YEAR(%d) column type is deprecated. " + + "Creating YEAR(4) column instead."), + ERR_NOT_VALID_PASSWORD(1819, new byte[]{'H', 'Y', '0', '0', '0'}, "Your password does not satisfy the current " + + "policy requirements"), + ERR_MUST_CHANGE_PASSWORD(1820, new byte[]{'H', 'Y', '0', '0', '0'}, "You must SET PASSWORD before executing this " + + "statement"), + ERR_FK_NO_INDEX_CHILD(1821, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constaint. " + + "Missing index for constraint '%s' in the foreign table '%s'"), + ERR_FK_NO_INDEX_PARENT(1822, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constaint. " + + "Missing index for constraint '%s' in the referenced table '%s'"), + ERR_FK_FAIL_ADD_SYSTEM(1823, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constraint '%s' " + + "to system tables"), ERR_FK_CANNOT_OPEN_PARENT(1824, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to open the referenced table '%s'"), - ERR_FK_INCORRECT_OPTION(1825, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constraint on " + - "table '%s'. Incorrect options in FOREIGN KEY constraint '%s'"), + ERR_FK_INCORRECT_OPTION(1825, new byte[]{'H', 'Y', '0', '0', '0'}, "Failed to add the foreign key constraint on " + + "table '%s'. Incorrect options in FOREIGN KEY constraint '%s'"), ERR_FK_DUP_NAME(1826, new byte[]{'H', 'Y', '0', '0', '0'}, "Duplicate foreign key constraint name '%s'"), - ERR_PASSWORD_FORMAT(1827, new byte[]{'H', 'Y', '0', '0', '0'}, "The password hash doesn't have the expected " + - "format. Check if the correct password algorithm is being used with the PASSWORD() function."), - ERR_FK_COLUMN_CANNOT_DROP(1828, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop column '%s': needed in a " + - "foreign key constraint '%s'"), - ERR_FK_COLUMN_CANNOT_DROP_CHILD(1829, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop column '%s': needed in a " + - "foreign key constraint '%s' of table '%s'"), - ERR_FK_COLUMN_NOT_NULL(1830, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' cannot be NOT NULL: needed in a " + - "foreign key constraint '%s' SET NULL"), - ERR_DUP_INDEX(1831, new byte[]{'H', 'Y', '0', '0', '0'}, "Duplicate index '%s' defined on the table '%s.%s'. This" + - " is deprecated and will be disallowed in a future release."), - ERR_FK_COLUMN_CANNOT_CHANGE(1832, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change column '%s': used in a " + - "foreign key constraint '%s'"), - ERR_FK_COLUMN_CANNOT_CHANGE_CHILD(1833, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change column '%s': used in " + - "a foreign key constraint '%s' of table '%s'"), - ERR_FK_CANNOT_DELETE_PARENT(1834, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot delete rows from table which is " + - "parent in a foreign key constraint '%s' of table '%s'"), + ERR_PASSWORD_FORMAT(1827, new byte[]{'H', 'Y', '0', '0', '0'}, "The password hash doesn't have the expected " + + "format. Check if the correct password algorithm is being used with the PASSWORD() function."), + ERR_FK_COLUMN_CANNOT_DROP(1828, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop column '%s': needed in a " + + "foreign key constraint '%s'"), + ERR_FK_COLUMN_CANNOT_DROP_CHILD(1829, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop column '%s': needed in a " + + "foreign key constraint '%s' of table '%s'"), + ERR_FK_COLUMN_NOT_NULL(1830, new byte[]{'H', 'Y', '0', '0', '0'}, "Column '%s' cannot be NOT NULL: needed in a " + + "foreign key constraint '%s' SET NULL"), + ERR_DUP_INDEX(1831, new byte[]{'H', 'Y', '0', '0', '0'}, "Duplicate index '%s' defined on the table '%s.%s'. This" + + " is deprecated and will be disallowed in a future release."), + ERR_FK_COLUMN_CANNOT_CHANGE(1832, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change column '%s': used in a " + + "foreign key constraint '%s'"), + ERR_FK_COLUMN_CANNOT_CHANGE_CHILD(1833, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change column '%s': used in " + + "a foreign key constraint '%s' of table '%s'"), + ERR_FK_CANNOT_DELETE_PARENT(1834, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot delete rows from table which is " + + "parent in a foreign key constraint '%s' of table '%s'"), ERR_MALFORMED_PACKET(1835, new byte[]{'H', 'Y', '0', '0', '0'}, "Malformed communication packet."), ERR_READ_ONLY_MODE(1836, new byte[]{'H', 'Y', '0', '0', '0'}, "Running in read-only mode"), - ERR_GTID_NEXT_TYPE_UNDEFINED_GROUP(1837, new byte[]{'H', 'Y', '0', '0', '0'}, "When @@SESSION.GTID_NEXT is set to" + - " a GTID, you must explicitly set it to a different value after a COMMIT or ROLLBACK. Please check " + - "GTID_NEXT " + - "variable manual page for detailed explanation. Current @@SESSION.GTID_NEXT is '%s'."), - ERR_VARIABLE_NOT_SETTABLE_IN_SP(1838, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s cannot be set " + - "in stored procedures."), - ERR_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF(1839, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_PURGED " + - "can only be set when @@GLOBAL.GTID_MODE = ON."), - ERR_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY(1840, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + - ".GTID_PURGED can only be set when @@GLOBAL.GTID_EXECUTED is empty."), - ERR_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY(1841, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + - ".GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)."), - ERR_GTID_PURGED_WAS_CHANGED(1842, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_PURGED was changed from " + - "'%s' to '%s'."), - ERR_GTID_EXECUTED_WAS_CHANGED(1843, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_EXECUTED was changed from" + - " '%s' to '%s'."), - ERR_BINLOG_STMT_MODE_AND_NO_REPL_TABLES(1844, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + - "impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non " + - "replicated " + - "tables are written to."), - ERR_ALTER_OPERATION_NOT_SUPPORTED(1845, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not supported for this " + - "operation. Try %s."), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON(1846, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not supported. Reason:" + - " %s. Try %s."), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY(1847, new byte[]{'H', 'Y', '0', '0', '0'}, "COPY algorithm requires" + - " a lock"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION(1848, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition specific" + - " operations do not yet support LOCK/ALGORITHM"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME(1849, new byte[]{'H', 'Y', '0', '0', '0'}, "Columns " + - "participating in a foreign key are renamed"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE(1850, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change " + - "column type INPLACE"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK(1851, new byte[]{'H', 'Y', '0', '0', '0'}, "Adding foreign keys" + - " needs foreign_key_checks=OFF"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE(1852, new byte[]{'H', 'Y', '0', '0', '0'}, "Creating unique " + - "indexes with IGNORE requires COPY algorithm to remove duplicate rows"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK(1853, new byte[]{'H', 'Y', '0', '0', '0'}, "Dropping a primary key " + - "is not allowed without also adding a new primary key"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC(1854, new byte[]{'H', 'Y', '0', '0', '0'}, "Adding an " + - "auto-increment column requires a lock"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS(1855, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot replace " + - "hidden FTS_DOC_ID with a user-visible one"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS(1856, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop or " + - "rename FTS_DOC_ID"), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS(1857, new byte[]{'H', 'Y', '0', '0', '0'}, "Fulltext index creation " + - "requires a lock"), + ERR_GTID_NEXT_TYPE_UNDEFINED_GROUP(1837, new byte[]{'H', 'Y', '0', '0', '0'}, "When @@SESSION.GTID_NEXT is set to" + + " a GTID, you must explicitly set it to a different value after a COMMIT or ROLLBACK. Please check " + + "GTID_NEXT " + + "variable manual page for detailed explanation. Current @@SESSION.GTID_NEXT is '%s'."), + ERR_VARIABLE_NOT_SETTABLE_IN_SP(1838, new byte[]{'H', 'Y', '0', '0', '0'}, "The system variable %s cannot be set " + + "in stored procedures."), + ERR_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF(1839, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_PURGED " + + "can only be set when @@GLOBAL.GTID_MODE = ON."), + ERR_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY(1840, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + + ".GTID_PURGED can only be set when @@GLOBAL.GTID_EXECUTED is empty."), + ERR_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY(1841, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL" + + ".GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)."), + ERR_GTID_PURGED_WAS_CHANGED(1842, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_PURGED was changed from " + + "'%s' to '%s'."), + ERR_GTID_EXECUTED_WAS_CHANGED(1843, new byte[]{'H', 'Y', '0', '0', '0'}, "@@GLOBAL.GTID_EXECUTED was changed from" + + " '%s' to '%s'."), + ERR_BINLOG_STMT_MODE_AND_NO_REPL_TABLES(1844, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot execute statement: " + + "impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non " + + "replicated " + + "tables are written to."), + ERR_ALTER_OPERATION_NOT_SUPPORTED(1845, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not supported for this " + + "operation. Try %s."), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON(1846, new byte[]{'0', 'A', '0', '0', '0'}, "%s is not supported. Reason:" + + " %s. Try %s."), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY(1847, new byte[]{'H', 'Y', '0', '0', '0'}, "COPY algorithm requires" + + " a lock"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION(1848, new byte[]{'H', 'Y', '0', '0', '0'}, "Partition specific" + + " operations do not yet support LOCK/ALGORITHM"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME(1849, new byte[]{'H', 'Y', '0', '0', '0'}, "Columns " + + "participating in a foreign key are renamed"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE(1850, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot change " + + "column type INPLACE"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK(1851, new byte[]{'H', 'Y', '0', '0', '0'}, "Adding foreign keys" + + " needs foreign_key_checks=OFF"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE(1852, new byte[]{'H', 'Y', '0', '0', '0'}, "Creating unique " + + "indexes with IGNORE requires COPY algorithm to remove duplicate rows"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK(1853, new byte[]{'H', 'Y', '0', '0', '0'}, "Dropping a primary key " + + "is not allowed without also adding a new primary key"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC(1854, new byte[]{'H', 'Y', '0', '0', '0'}, "Adding an " + + "auto-increment column requires a lock"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS(1855, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot replace " + + "hidden FTS_DOC_ID with a user-visible one"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS(1856, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot drop or " + + "rename FTS_DOC_ID"), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS(1857, new byte[]{'H', 'Y', '0', '0', '0'}, "Fulltext index creation " + + "requires a lock"), ERR_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE(1858, new byte[]{'H', 'Y', '0', '0', '0'}, - "sql_slave_skip_counter can not be set when the server is running with @@GLOBAL.GTID_MODE = ON. Instead, " + - "for each" + - " transaction that you want to skip, generate an empty transaction with the same GTID as the " + - "transaction"), + "sql_slave_skip_counter can not be set when the server is running with @@GLOBAL.GTID_MODE = ON. Instead, " + + "for each" + + " transaction that you want to skip, generate an empty transaction with the same GTID as the " + + "transaction"), ERR_DUP_UNKNOWN_IN_INDEX(1859, new byte[]{'2', '3', '0', '0', '0'}, "Duplicate entry for key '%s'"), - ERR_IDENT_CAUSES_TOO_LONG_PATH(1860, new byte[]{'H', 'Y', '0', '0', '0'}, "Long database name and identifier for " + - "object resulted in path length exceeding %d characters. Path: '%s'."), - ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL(1861, new byte[]{'H', 'Y', '0', '0', '0'}, "cannot silently " + - "convert NULL values, as required in this SQL_MODE"), - ERR_MUST_CHANGE_PASSWORD_LOGIN(1862, new byte[]{'H', 'Y', '0', '0', '0'}, "Your password has expired. To log in " + - "you must change it using a client that supports expired passwords."), + ERR_IDENT_CAUSES_TOO_LONG_PATH(1860, new byte[]{'H', 'Y', '0', '0', '0'}, "Long database name and identifier for " + + "object resulted in path length exceeding %d characters. Path: '%s'."), + ERR_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL(1861, new byte[]{'H', 'Y', '0', '0', '0'}, "cannot silently " + + "convert NULL values, as required in this SQL_MODE"), + ERR_MUST_CHANGE_PASSWORD_LOGIN(1862, new byte[]{'H', 'Y', '0', '0', '0'}, "Your password has expired. To log in " + + "you must change it using a client that supports expired passwords."), ERR_ROW_IN_WRONG_PARTITION(1863, new byte[]{'H', 'Y', '0', '0', '0'}, "Found a row in wrong partition %s"), - ERR_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX(1864, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot schedule event %s, " + - "relay-log name %s, position %s to Worker thread because its size %d exceeds %d of " + - "slave_pending_jobs_size_max" + - "."), - ERR_INNODB_NO_FT_USES_PARSER(1865, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot CREATE FULLTEXT INDEX WITH PARSER" + - " on InnoDB table"), - ERR_BINLOG_LOGICAL_CORRUPTION(1866, new byte[]{'H', 'Y', '0', '0', '0'}, "The binary log file '%s' is logically " + - "corrupted: %s"), - ERR_WARN_PURGE_LOG_IN_USE(1867, new byte[]{'H', 'Y', '0', '0', '0'}, "file %s was not purged because it was being" + - " read by %d thread(s), purged only %d out of %d files."), - ERR_WARN_PURGE_LOG_IS_ACTIVE(1868, new byte[]{'H', 'Y', '0', '0', '0'}, "file %s was not purged because it is the" + - " active log file."), - ERR_AUTO_INCREMENT_CONFLICT(1869, new byte[]{'H', 'Y', '0', '0', '0'}, "Auto-increment value in UPDATE conflicts " + - "with internally generated values"), - WARN_ON_BLOCKHOLE_IN_RBR(1870, new byte[]{'H', 'Y', '0', '0', '0'}, "Row events are not logged for %s statements " + - "that modify BLACKHOLE tables in row format. Table(s): '%s'"), - ERR_SLAVE_MI_INIT_REPOSITORY(1871, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave failed to initialize master info " + - "structure from the repository"), - ERR_SLAVE_RLI_INIT_REPOSITORY(1872, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave failed to initialize relay log " + - "info structure from the repository"), - ERR_ACCESS_DENIED_CHANGE_USER_ERROR(1873, new byte[]{'2', '8', '0', '0', '0'}, "Access denied trying to change to" + - " user '%s'@'%s' (using password: %s). Disconnecting."), + ERR_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX(1864, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot schedule event %s, " + + "relay-log name %s, position %s to Worker thread because its size %d exceeds %d of " + + "slave_pending_jobs_size_max" + + "."), + ERR_INNODB_NO_FT_USES_PARSER(1865, new byte[]{'H', 'Y', '0', '0', '0'}, "Cannot CREATE FULLTEXT INDEX WITH PARSER" + + " on InnoDB table"), + ERR_BINLOG_LOGICAL_CORRUPTION(1866, new byte[]{'H', 'Y', '0', '0', '0'}, "The binary log file '%s' is logically " + + "corrupted: %s"), + ERR_WARN_PURGE_LOG_IN_USE(1867, new byte[]{'H', 'Y', '0', '0', '0'}, "file %s was not purged because it was being" + + " read by %d thread(s), purged only %d out of %d files."), + ERR_WARN_PURGE_LOG_IS_ACTIVE(1868, new byte[]{'H', 'Y', '0', '0', '0'}, "file %s was not purged because it is the" + + " active log file."), + ERR_AUTO_INCREMENT_CONFLICT(1869, new byte[]{'H', 'Y', '0', '0', '0'}, "Auto-increment value in UPDATE conflicts " + + "with internally generated values"), + WARN_ON_BLOCKHOLE_IN_RBR(1870, new byte[]{'H', 'Y', '0', '0', '0'}, "Row events are not logged for %s statements " + + "that modify BLACKHOLE tables in row format. Table(s): '%s'"), + ERR_SLAVE_MI_INIT_REPOSITORY(1871, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave failed to initialize master info " + + "structure from the repository"), + ERR_SLAVE_RLI_INIT_REPOSITORY(1872, new byte[]{'H', 'Y', '0', '0', '0'}, "Slave failed to initialize relay log " + + "info structure from the repository"), + ERR_ACCESS_DENIED_CHANGE_USER_ERROR(1873, new byte[]{'2', '8', '0', '0', '0'}, "Access denied trying to change to" + + " user '%s'@'%s' (using password: %s). Disconnecting."), ERR_INNODB_READ_ONLY(1874, new byte[]{'H', 'Y', '0', '0', '0'}, "InnoDB is in read only mode."), - ERR_STOP_SLAVE_SQL_THREAD_TIMEOUT(1875, new byte[]{'H', 'Y', '0', '0', '0'}, "STOP SLAVE command execution is " + - "incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current " + - "task is" + - " complete."), - ERR_STOP_SLAVE_IO_THREAD_TIMEOUT(1876, new byte[]{'H', 'Y', '0', '0', '0'}, "STOP SLAVE command execution is " + - "incomplete: Slave IO thread got the stop signal, thread is busy, IO thread will stop once the current " + - "task is " + - "complete."), - ERR_TABLE_CORRUPT(1877, new byte[]{'H', 'Y', '0', '0', '0'}, "Operation cannot be performed. The table '%s.%s' is" + - " missing, corrupt or contains bad data."), + ERR_STOP_SLAVE_SQL_THREAD_TIMEOUT(1875, new byte[]{'H', 'Y', '0', '0', '0'}, "STOP SLAVE command execution is " + + "incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current " + + "task is" + + " complete."), + ERR_STOP_SLAVE_IO_THREAD_TIMEOUT(1876, new byte[]{'H', 'Y', '0', '0', '0'}, "STOP SLAVE command execution is " + + "incomplete: Slave IO thread got the stop signal, thread is busy, IO thread will stop once the current " + + "task is " + + "complete."), + ERR_TABLE_CORRUPT(1877, new byte[]{'H', 'Y', '0', '0', '0'}, "Operation cannot be performed. The table '%s.%s' is" + + " missing, corrupt or contains bad data."), ERR_TEMP_FILE_WRITE_FAILURE(1878, new byte[]{'H', 'Y', '0', '0', '0'}, "Temporary file write failure."), - ERR_INNODB_FT_AUX_NOT_HEX_ID(1879, new byte[]{'H', 'Y', '0', '0', '0'}, "Upgrade index name failed, please use " + - "create index(alter table) algorithm copy to rebuild index."), + ERR_INNODB_FT_AUX_NOT_HEX_ID(1879, new byte[]{'H', 'Y', '0', '0', '0'}, "Upgrade index name failed, please use " + + "create index(alter table) algorithm copy to rebuild index."), ERR_LAST_MYSQL_ERROR_MESSAGE(1880, new byte[]{}, ""), // Following is Palo's error code, which start from 5000 ERR_NOT_OLAP_TABLE(5000, new byte[]{'H', 'Y', '0', '0', '0'}, "Table '%s' is not a OLAP table"), @@ -1668,8 +1668,8 @@ public enum ErrorCode { ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_EMPTY(5076, new byte[]{'4', '2', '0', '0', '0'}, "Dynamic reserved history periods is empty."), ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID(5077, new byte[]{'4', '2', '0', '0', '0'}, - "Invalid \" %s \" value %s. It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is " + - "DAY/WEEK/MONTH or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."), + "Invalid \" %s \" value %s. It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is " + + "DAY/WEEK/MONTH or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."), ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_START_ENDS_LENGTH_NOT_EQUAL(5078, new byte[]{'4', '2', '0', '0', '0'}, "RESERVED_HISTORY_PERIODS must have pairs of date value. The input %s is not valid."), @@ -1682,8 +1682,8 @@ public enum ErrorCode { ERR_ILLEGAL_COLUMN_REFERENCE_ERROR(5082, new byte[]{'4', '2', '0', '0', '1'}, "Illegal column/field reference '%s' of semi-/anti-join"), ERR_EMPTY_PARTITION_IN_TABLE(5083, new byte[]{'4', '2', '0', '0', '0'}, - "data cannot be inserted into table with empty partition. " + - "Use `SHOW PARTITIONS FROM %s` to see the currently partitions of this table. "), + "data cannot be inserted into table with empty partition. " + + "Use `SHOW PARTITIONS FROM %s` to see the currently partitions of this table. "), ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE(5084, new byte[]{'4', '2', '0', '0', '0'}, "sql/sqlHash and partition_num/tablet_num/cardinality cannot be set in one rule.") ; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java b/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java index d723c886a4a9bd..7c3e7e31c72090 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java @@ -36,20 +36,20 @@ public LabelAlreadyUsedException(String label) { public LabelAlreadyUsedException(TransactionState txn) { super("Label [" + txn.getLabel() + "] has already been used, relate to txn [" + txn.getTransactionId() + "]"); switch (txn.getTransactionStatus()) { - case UNKNOWN: - case PREPARE: - jobStatus = "RUNNING"; - break; - case PRECOMMITTED: - jobStatus = "PRECOMMITTED"; - break; - case COMMITTED: - case VISIBLE: - jobStatus = "FINISHED"; - break; - default: - Preconditions.checkState(false, txn.getTransactionStatus()); - break; + case UNKNOWN: + case PREPARE: + jobStatus = "RUNNING"; + break; + case PRECOMMITTED: + jobStatus = "PRECOMMITTED"; + break; + case COMMITTED: + case VISIBLE: + jobStatus = "FINISHED"; + break; + default: + Preconditions.checkState(false, txn.getTransactionStatus()); + break; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java index 0a56d9603e6ec8..87105d56b96d85 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java @@ -37,73 +37,73 @@ public class Log4jConfig extends XmlConfiguration { private static final long serialVersionUID = 1L; - private static String xmlConfTemplate = "\n" + - "\n\n" + - "\n" + - " \n" + - " " + - " \n" + - " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + - " \n" + - " " + - " \n" + - " \n" + - " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " %d{yyyy-MM-dd HH:mm:ss,SSS} [%c{1}] %m%n\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - ""; + private static String xmlConfTemplate = "\n" + + "\n\n" + + "\n" + + " \n" + + " " + + " \n" + + " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + + " \n" + + " " + + " \n" + + " \n" + + " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " %d{yyyy-MM-dd HH:mm:ss,SSS} %p (%t|%tid) [%C{1}.%M():%L] %m%n\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " %d{yyyy-MM-dd HH:mm:ss,SSS} [%c{1}] %m%n\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + ""; private static StrSubstitutor strSub; private static String sysLogLevel; @@ -130,10 +130,10 @@ private static void reconfig() throws IOException { String sysRollNum = String.valueOf(Config.sys_log_roll_num); String sysDeleteAge = String.valueOf(Config.sys_log_delete_age); - if (!(sysLogLevel.equalsIgnoreCase("INFO") || - sysLogLevel.equalsIgnoreCase("WARN") || - sysLogLevel.equalsIgnoreCase("ERROR") || - sysLogLevel.equalsIgnoreCase("FATAL"))) { + if (!(sysLogLevel.equalsIgnoreCase("INFO") + || sysLogLevel.equalsIgnoreCase("WARN") + || sysLogLevel.equalsIgnoreCase("ERROR") + || sysLogLevel.equalsIgnoreCase("FATAL"))) { throw new IOException("sys_log_level config error"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MarkedCountDownLatch.java b/fe/fe-core/src/main/java/org/apache/doris/common/MarkedCountDownLatch.java index 53aa426329db81..ba407e7f3e1adc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MarkedCountDownLatch.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MarkedCountDownLatch.java @@ -61,7 +61,7 @@ public synchronized void countDownToZero(Status status) { if (st.ok()) { st = status; } - while(getCount() > 0) { + while (getCount() > 0) { super.countDown(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaFooter.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaFooter.java index 946cd983b87616..1e9dcace461d9b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaFooter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaFooter.java @@ -56,7 +56,7 @@ public class MetaFooter { public List metaIndices; public static MetaFooter read(File imageFile) throws IOException { - try(RandomAccessFile raf = new RandomAccessFile(imageFile, "r")) { + try (RandomAccessFile raf = new RandomAccessFile(imageFile, "r")) { long fileLength = raf.length(); long footerLengthIndex = fileLength - FOOTER_LENGTH_SIZE - MetaMagicNumber.MAGIC_STR.length(); raf.seek(footerLengthIndex); @@ -86,7 +86,7 @@ public static MetaFooter read(File imageFile) throws IOException { } public static void write(File imageFile, List metaIndices, long checksum) throws IOException { - try(RandomAccessFile raf = new RandomAccessFile(imageFile, "rw")) { + try (RandomAccessFile raf = new RandomAccessFile(imageFile, "rw")) { long startIndex = raf.length(); raf.seek(startIndex); raf.writeLong(checksum); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaHeader.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaHeader.java index 479b36e21063a3..f96bfc7f9e2583 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaHeader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaHeader.java @@ -51,7 +51,7 @@ public class MetaHeader { public MetaJsonHeader metaJsonHeader; public static MetaHeader read(File imageFile) throws IOException { - try(RandomAccessFile raf = new RandomAccessFile(imageFile, "r")) { + try (RandomAccessFile raf = new RandomAccessFile(imageFile, "r")) { raf.seek(0); MetaMagicNumber magicNumber = MetaMagicNumber.read(raf); if (!Arrays.equals(MetaMagicNumber.MAGIC, magicNumber.getBytes())) { @@ -61,8 +61,9 @@ public static MetaHeader read(File imageFile) throws IOException { } MetaJsonHeader metaJsonHeader = MetaJsonHeader.read(raf); if (!MetaJsonHeader.IMAGE_VERSION.equalsIgnoreCase(metaJsonHeader.imageVersion)) { - String errMsg = "Image file " + imageFile.getPath() + " format version mismatch. " + - "Expected version is "+ MetaJsonHeader.IMAGE_VERSION +", actual is" + metaJsonHeader.imageVersion; + String errMsg = "Image file " + imageFile.getPath() + " format version mismatch. " + + "Expected version is " + MetaJsonHeader.IMAGE_VERSION + + ", actual is" + metaJsonHeader.imageVersion; // different versions are incompatible throw new IOException(errMsg); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java index 47b149f506462c..f285b4d0a5b0ff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java @@ -90,7 +90,7 @@ public static void read(File imageFile, Catalog catalog) throws IOException, Ddl checksum = catalog.loadBrokers(dis, checksum); checksum = catalog.loadResources(dis, checksum); checksum = catalog.loadExportJob(dis, checksum); - checksum = catalog.loadSyncJobs(dis,checksum); + checksum = catalog.loadSyncJobs(dis, checksum); checksum = catalog.loadBackupHandler(dis, checksum); checksum = catalog.loadPaloAuth(dis, checksum); // global transaction must be replayed before load jobs v2 diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java index 0f1adbe5715805..553a022cf9fff6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java @@ -85,7 +85,7 @@ public void setDelegate(CountingDataOutputStream dos, List indices) { } public long doWork(String name, WriteMethod method) throws IOException { - if(delegate == null){ + if (delegate == null) { return method.write(); } return delegate.doWork(name, method); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java index 78cd12bdf65daa..40ee36606a17f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java @@ -91,28 +91,29 @@ public ThriftServerType getType() { private void createSimpleServer() throws TTransportException { TServer.Args args = new TServer.Args(new TServerSocket(port)).protocolFactory( - new TBinaryProtocol.Factory()).processor(processor); + new TBinaryProtocol.Factory()).processor(processor); server = new TSimpleServer(args); } private void createThreadedServer() throws TTransportException { - TThreadedSelectorServer.Args args = - new TThreadedSelectorServer.Args(new TNonblockingServerSocket(port, Config.thrift_client_timeout_ms)).protocolFactory( - new TBinaryProtocol.Factory()).processor(processor); + TThreadedSelectorServer.Args args = new TThreadedSelectorServer.Args( + new TNonblockingServerSocket(port, Config.thrift_client_timeout_ms)).protocolFactory( + new TBinaryProtocol.Factory()).processor(processor); ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true); args.executorService(threadPoolExecutor); server = new TThreadedSelectorServer(args); } private void createThreadPoolServer() throws TTransportException { - TServerSocket.ServerSocketTransportArgs socketTransportArgs = new TServerSocket.ServerSocketTransportArgs() - .bindAddr(new InetSocketAddress(port)) - .clientTimeout(Config.thrift_client_timeout_ms) - .backlog(Config.thrift_backlog_num); + TServerSocket.ServerSocketTransportArgs socketTransportArgs = + new TServerSocket.ServerSocketTransportArgs() + .bindAddr(new InetSocketAddress(port)) + .clientTimeout(Config.thrift_client_timeout_ms) + .backlog(Config.thrift_backlog_num); TThreadPoolServer.Args serverArgs = - new TThreadPoolServer.Args(new TServerSocket(socketTransportArgs)).protocolFactory( - new TBinaryProtocol.Factory()).processor(processor); + new TThreadPoolServer.Args(new TServerSocket(socketTransportArgs)).protocolFactory( + new TBinaryProtocol.Factory()).processor(processor); ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true); serverArgs.executorService(threadPoolExecutor); server = new TThreadPoolServer(serverArgs); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/TreeNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/TreeNode.java index 22de70a1e385c1..855a7788c97478 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/TreeNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/TreeNode.java @@ -41,17 +41,28 @@ public void addChild(NodeType n) { children.add(n); } - public void addChildren(List n) { + public void addChildren(List n) { children.addAll(n); } - public boolean hasChild(int i) { return children.size() > i; } - public void setChild(int index, NodeType n) { children.set(index, n); } - public ArrayList getChildren() { return children; } - public void clearChildren() { children.clear(); } + public boolean hasChild(int i) { + return children.size() > i; + } + + public void setChild(int index, NodeType n) { + children.set(index, n); + } + + public ArrayList getChildren() { + return children; + } + + public void clearChildren() { + children.clear(); + } - public void removeNode(int i){ - if (children != null && i>=0 && i< children.size()) { + public void removeNode(int i) { + if (children != null && i >= 0 && i < children.size()) { children.remove(i); } } @@ -62,7 +73,7 @@ public void removeNode(int i){ */ public int numNodes() { int numNodes = 1; - for (NodeType child: children) { + for (NodeType child : children) { numNodes += child.numNodes(); } return numNodes; @@ -84,7 +95,7 @@ public , D extends C> void collect( matches.add((D) this); return; } - for (NodeType child: children) { + for (NodeType child : children) { child.collect(predicate, matches); } } @@ -100,7 +111,7 @@ public , D extends C> void collect( matches.add((D) this); return; } - for (NodeType child: children) { + for (NodeType child : children) { child.collect(cl, matches); } } @@ -115,7 +126,7 @@ public , D extends C> void collectAll( if (predicate.apply((C) this)) { matches.add((D) this); } - for (NodeType child: children) { + for (NodeType child : children) { child.collectAll(predicate, matches); } } @@ -126,7 +137,7 @@ public , D extends C> void collectAll( */ public static , D extends C> void collect( Collection nodeList, Predicate predicate, Collection matches) { - for (C node: nodeList) { + for (C node : nodeList) { node.collect(predicate, matches); } } @@ -137,7 +148,7 @@ public static , D extends C> void collect( */ public static , D extends C> void collect( Collection nodeList, Class cl, Collection matches) { - for (C node: nodeList) { + for (C node : nodeList) { node.collect(cl, matches); } } @@ -162,7 +173,7 @@ public > boolean contains( if (predicate.apply((C) this)) { return true; } - for (NodeType child: children) { + for (NodeType child : children) { if (child.contains(predicate)) { return true; } @@ -176,7 +187,7 @@ public > boolean contains( */ public static , D extends C> boolean contains( Collection nodeList, Predicate predicate) { - for (C node: nodeList) { + for (C node : nodeList) { if (node.contains(predicate)) { return true; } @@ -189,7 +200,7 @@ public static , D extends C> boolean contains( */ public static > boolean contains( List nodeList, Class cl) { - for (C node: nodeList) { + for (C node : nodeList) { if (node.contains(cl)) { return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/ParquetReader.java b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/ParquetReader.java index 867b3de71412fb..e9ac9a7433f313 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/ParquetReader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/ParquetReader.java @@ -112,7 +112,6 @@ private void parseGroup(Group g, List line) { for (int field = 0; field < fieldCount; field++) { int valueCount = g.getFieldRepetitionCount(field); Type fieldType = g.getType().getType(field); - String fieldName = fieldType.getName(); if (valueCount == 1) { line.add(g.getValueToString(field, 0)); } else if (valueCount > 1) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BdbjeDatabaseProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BdbjeDatabaseProcDir.java index a002fb46eba755..2d3bf51d1b6bf2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BdbjeDatabaseProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BdbjeDatabaseProcDir.java @@ -33,7 +33,7 @@ public class BdbjeDatabaseProcDir implements ProcDirInterface { private String dbName; - public BdbjeDatabaseProcDir(String dbName){ + public BdbjeDatabaseProcDir(String dbName) { this.dbName = dbName; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java index 427ef2a9359f3d..3fa789989a2aed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java @@ -101,7 +101,7 @@ public Collection getInstanceStatistics(QueryStatisticsItem private Map collectInstanceProfile(RuntimeProfile queryProfile) { final Map instanceProfiles = Maps.newHashMap(); for (RuntimeProfile fragmentProfile : queryProfile.getChildMap().values()) { - for (Map.Entry entry: fragmentProfile.getChildMap().entrySet()) { + for (Map.Entry entry : fragmentProfile.getChildMap().entrySet()) { Preconditions.checkState(instanceProfiles.put(parseInstanceId(entry.getKey()), entry.getValue()) == null); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryStatementsProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryStatementsProcNode.java index 477f3eb8bb613d..d757de2067beae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryStatementsProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryStatementsProcNode.java @@ -46,9 +46,6 @@ public ProcResult fetchResult() throws AnalysisException { result.setNames(TITLE_NAMES.asList()); final List> sortedRowData = Lists.newArrayList(); - final CurrentQueryInfoProvider provider = new CurrentQueryInfoProvider(); - final Map statisticsMap - = provider.getQueryStatistics(statistic.values()); for (QueryStatisticsItem item : statistic.values()) { final List values = Lists.newArrayList(); values.add(item.getQueryId()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java index b08444bd7deb41..145905d226bc21 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java @@ -90,7 +90,8 @@ public ProcResult fetchResult() throws AnalysisException { colNames.add(column.getName()); } partitionInfo.add(joiner.join(colNames)); // partition key - partitionInfo.add(rangePartitionInfo.getItem(esShardPartitions.getPartitionId()).getItems().toString());// range + partitionInfo.add( + rangePartitionInfo.getItem(esShardPartitions.getPartitionId()).getItems().toString()); // range partitionInfo.add("-"); // dis partitionInfo.add(esShardPartitions.getShardRoutings().size()); // shards partitionInfo.add(1); // replica num diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ExportProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ExportProcNode.java index 3bb816e21b8fc1..4c469a3039d304 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ExportProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ExportProcNode.java @@ -57,7 +57,8 @@ public ProcResult fetchResult() throws AnalysisException { BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); - List> jobInfos = exportMgr.getExportJobInfosByIdOrState(db.getId(), 0, "",false, null, null, LIMIT); + List> jobInfos = exportMgr.getExportJobInfosByIdOrState( + db.getId(), 0, "", false, null, null, LIMIT); result.setRows(jobInfos); return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java index 0c89960467ec1e..cd6c768936196f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java @@ -101,7 +101,7 @@ public boolean filter(String columnName, Comparable element, Map f rightVal = ((DateLiteral) subExpr.getChild(1)).getLongValue(); } else { leftVal = Long.parseLong(element.toString()); - rightVal = ((IntLiteral)subExpr.getChild(1)).getLongValue(); + rightVal = ((IntLiteral) subExpr.getChild(1)).getLongValue(); } switch (binaryPredicate.getOp()) { case EQ: @@ -121,7 +121,7 @@ public boolean filter(String columnName, Comparable element, Map f Preconditions.checkState(false, "No defined binary operator."); } } else { - return like((String)element, ((StringLiteral) subExpr.getChild(1)).getValue()); + return like((String) element, ((StringLiteral) subExpr.getChild(1)).getValue()); } return true; } @@ -177,7 +177,7 @@ public ProcResult fetchResultByFilter(Map filterMap, List filterPartitionInfos.size()) { endIndex = filterPartitionInfos.size(); } - filterPartitionInfos = filterPartitionInfos.subList(beginIndex,endIndex); + filterPartitionInfos = filterPartitionInfos.subList(beginIndex, endIndex); } return getBasicProcResult(filterPartitionInfos); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RollupProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RollupProcDir.java index c10eeb44188ea2..28318a87f02444 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/RollupProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/RollupProcDir.java @@ -88,7 +88,7 @@ public ProcResult fetchResultByFilter(HashMap filter, ArrayList> jobInfos = Lists.newArrayList(); //where - if (filter == null || filter.size() == 0){ + if (filter == null || filter.size() == 0) { jobInfos = rollupJobInfos; } else { jobInfos = Lists.newArrayList(); @@ -126,7 +126,7 @@ public ProcResult fetchResultByFilter(HashMap filter, ArrayList jobInfos.size()) { endIndex = jobInfos.size(); } - jobInfos = jobInfos.subList(beginIndex,endIndex); + jobInfos = jobInfos.subList(beginIndex, endIndex); } BaseProcResult result = new BaseProcResult(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/SchemaChangeProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/SchemaChangeProcDir.java index 8e91cbd31c65ea..04b04414415905 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/SchemaChangeProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/SchemaChangeProcDir.java @@ -105,14 +105,14 @@ public ProcResult fetchResultByFilter(HashMap filter, ArrayList> jobInfos; - if (filter == null || filter.size() == 0){ + if (filter == null || filter.size() == 0) { jobInfos = schemaChangeJobInfos; } else { jobInfos = Lists.newArrayList(); for (List infoStr : schemaChangeJobInfos) { if (infoStr.size() != TITLE_NAMES.size()) { LOG.warn("SchemaChangeJobInfos.size() " + schemaChangeJobInfos.size() - + " not equal TITLE_NAMES.size() " + TITLE_NAMES.size()); + + " not equal TITLE_NAMES.size() " + TITLE_NAMES.size()); continue; } boolean isNeed = true; @@ -143,7 +143,7 @@ public ProcResult fetchResultByFilter(HashMap filter, ArrayList jobInfos.size()) { endIndex = jobInfos.size(); } - jobInfos = jobInfos.subList(beginIndex,endIndex); + jobInfos = jobInfos.subList(beginIndex, endIndex); } BaseProcResult result = new BaseProcResult(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java index e83186466f7e70..dc1efa62431d85 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java @@ -23,11 +23,9 @@ import org.apache.doris.catalog.MaterializedIndex.IndexExtState; import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Partition; -import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.catalog.Table.TableType; import org.apache.doris.catalog.Tablet; import org.apache.doris.common.AnalysisException; -import org.apache.doris.system.SystemInfoService; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -90,14 +88,12 @@ static class DBStatistic { this.db = db; this.dbNum = 1; - SystemInfoService infoService = Catalog.getCurrentSystemInfo(); db.getTables().stream().filter(t -> t != null && t.getType() == TableType.OLAP).forEach(t -> { ++tableNum; OlapTable olapTable = (OlapTable) t; olapTable.readLock(); try { for (Partition partition : olapTable.getAllPartitions()) { - ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()); ++partitionNum; for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { ++indexNum; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java index bd8ecdb846ce4e..403cda9120c3e8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java @@ -173,8 +173,8 @@ static class DBTabletStatistic { this.cloningNum = cloningTabletIds.size(); db.getTables().stream().filter(t -> t != null && t.getType() == Table.TableType.OLAP).forEach(t -> { OlapTable olapTable = (OlapTable) t; - ColocateTableIndex.GroupId groupId = colocateTableIndex.isColocateTable(olapTable.getId()) ? - colocateTableIndex.getGroup(olapTable.getId()) : null; + ColocateTableIndex.GroupId groupId = colocateTableIndex.isColocateTable(olapTable.getId()) + ? colocateTableIndex.getGroup(olapTable.getId()) : null; olapTable.readLock(); try { for (Partition partition : olapTable.getAllPartitions()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TrashProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TrashProcDir.java index 9b239009312c58..f371c37b92fce7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TrashProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TrashProcDir.java @@ -80,7 +80,6 @@ public static void getTrashInfo(List backends, List> infos Long trashUsedCapacityB = null; boolean ok = false; try { - long start = System.currentTimeMillis(); address = new TNetworkAddress(backend.getHost(), backend.getBePort()); client = ClientPool.backendPool.borrowObject(address); trashUsedCapacityB = client.getTrashUsedCapacity(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/MultiProfileTreeBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/MultiProfileTreeBuilder.java index 204113fa944080..a8d800e4cf6448 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/MultiProfileTreeBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/MultiProfileTreeBuilder.java @@ -112,7 +112,7 @@ public List> getSubTaskInfo() { } public List> getInstanceList(String executionId, String fragmentId) - throws AnalysisException { + throws AnalysisException { ProfileTreeBuilder singleBuilder = getExecutionProfileTreeBuilder(executionId); return singleBuilder.getInstanceList(fragmentId); } @@ -128,7 +128,8 @@ public ProfileTreeNode getFragmentTreeRoot(String executionId) throws AnalysisEx return singleBuilder.getFragmentTreeRoot(); } - public List getFragmentInstances(String executionId) throws AnalysisException{ + public List getFragmentInstances(String executionId) + throws AnalysisException { return getExecutionProfileTreeBuilder(executionId).getFragmentsInstances(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java index b193c114a9b212..c56ae75ed823c0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java @@ -275,8 +275,8 @@ private ProfileTreeNode buildTreeNode(RuntimeProfile profile, ProfileTreeNode ro node.setParentNode(root); } - if ((node.name.equals(PROFILE_NAME_EXCHANGE_NODE) || - node.name.equals(PROFILE_NAME_VEXCHANGE_NODE)) && instanceId == null) { + if ((node.name.equals(PROFILE_NAME_EXCHANGE_NODE) + || node.name.equals(PROFILE_NAME_VEXCHANGE_NODE)) && instanceId == null) { exchangeNodes.add(node); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java index 089a1186c1f227..843863b034b93c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java @@ -126,7 +126,7 @@ public static void parseFile(String path, BrokerDesc brokerDesc, List MEGABYTE ) { + } else if (value > MEGABYTE) { unit = "MB"; doubleValue /= MEGABYTE; } else if (value > KILOBYTE) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DigitalVersion.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DigitalVersion.java index 531937f08561b9..e7c72da2ed9b37 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DigitalVersion.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DigitalVersion.java @@ -120,10 +120,10 @@ public boolean equals(Object o) { return false; } DigitalVersion version = (DigitalVersion) o; - return id == version.id && - major == version.major && - minor == version.minor && - revision == version.revision; + return id == version.id + && major == version.major + && minor == version.minor + && revision == version.revision; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java index 370d4a755c33b8..b632fb26c7c6a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java @@ -312,15 +312,17 @@ private static void checkReservedHistoryPeriodValidate(String reservedHistoryPer String formattedLowerBound = sdf.format(sdf.parse(range.lowerEndpoint().toString())); String formattedUpperBound = sdf.format(sdf.parse(range.upperEndpoint().toString())); if (!range.lowerEndpoint().toString().equals(formattedLowerBound) || !range.upperEndpoint().toString().equals(formattedUpperBound)) { - throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS + - " value. It must be correct DATE value \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + - "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."); + throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS + + " value. It must be correct DATE value \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\"" + + " while time_unit is DAY/WEEK/MONTH or" + + " \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."); } } } catch (ParseException e) { - throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS + - " value. It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + - "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."); + throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS + + " value. It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\"" + + " while time_unit is DAY/WEEK/MONTH " + + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR."); } } } @@ -366,16 +368,16 @@ public static boolean checkInputDynamicPartitionProperties(Map p String historyPartitionNum = properties.get(DynamicPartitionProperty.HISTORY_PARTITION_NUM); String reservedHistoryPeriods = properties.get(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS); - if (!(Strings.isNullOrEmpty(enable) && - Strings.isNullOrEmpty(timeUnit) && - Strings.isNullOrEmpty(timeZone) && - Strings.isNullOrEmpty(prefix) && - Strings.isNullOrEmpty(start) && - Strings.isNullOrEmpty(end) && - Strings.isNullOrEmpty(buckets) && - Strings.isNullOrEmpty(createHistoryPartition) && - Strings.isNullOrEmpty(historyPartitionNum) && - Strings.isNullOrEmpty(reservedHistoryPeriods))) { + if (!(Strings.isNullOrEmpty(enable) + && Strings.isNullOrEmpty(timeUnit) + && Strings.isNullOrEmpty(timeZone) + && Strings.isNullOrEmpty(prefix) + && Strings.isNullOrEmpty(start) + && Strings.isNullOrEmpty(end) + && Strings.isNullOrEmpty(buckets) + && Strings.isNullOrEmpty(createHistoryPartition) + && Strings.isNullOrEmpty(historyPartitionNum) + && Strings.isNullOrEmpty(reservedHistoryPeriods))) { if (Strings.isNullOrEmpty(enable)) { properties.put(DynamicPartitionProperty.ENABLE, "true"); } @@ -571,17 +573,17 @@ public static Map analyzeDynamicPartition(Map pr public static void checkAlterAllowed(OlapTable olapTable) throws DdlException { TableProperty tableProperty = olapTable.getTableProperty(); - if (tableProperty != null && tableProperty.getDynamicPartitionProperty() != null && - tableProperty.getDynamicPartitionProperty().isExist() && - tableProperty.getDynamicPartitionProperty().getEnable()) { - throw new DdlException("Cannot add/drop partition on a Dynamic Partition Table, " + - "Use command `ALTER TABLE tbl_name SET (\"dynamic_partition.enable\" = \"false\")` firstly."); + if (tableProperty != null && tableProperty.getDynamicPartitionProperty() != null + && tableProperty.getDynamicPartitionProperty().isExist() + && tableProperty.getDynamicPartitionProperty().getEnable()) { + throw new DdlException("Cannot add/drop partition on a Dynamic Partition Table, " + + "Use command `ALTER TABLE tbl_name SET (\"dynamic_partition.enable\" = \"false\")` firstly."); } } public static boolean isDynamicPartitionTable(Table table) { - if (!(table instanceof OlapTable) || - !(((OlapTable) table).getPartitionInfo().getType().equals(PartitionType.RANGE))) { + if (!(table instanceof OlapTable) + || !(((OlapTable) table).getPartitionInfo().getType().equals(PartitionType.RANGE))) { return false; } RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) ((OlapTable) table).getPartitionInfo(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java index 3bdbd7e0a4f933..b6cd819afc9e1b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java @@ -60,13 +60,9 @@ public static List getAllKafkaPartitions(String brokerList, String topi .setKafkaInfo(InternalService.PKafkaLoadInfo.newBuilder() .setBrokers(brokerList) .setTopic(topic) - .addAllProperties( - convertedCustomProperties.entrySet().stream().map( - e -> InternalService.PStringPair.newBuilder() - .setKey(e.getKey()) - .setVal(e.getValue()) - .build() - ).collect(Collectors.toList()) + .addAllProperties(convertedCustomProperties.entrySet().stream() + .map(e -> InternalService.PStringPair.newBuilder().setKey(e.getKey()) + .setVal(e.getValue()).build()).collect(Collectors.toList()) ) ) ).build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java index cfe5a293bd5dd1..e203377b98207c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java @@ -132,8 +132,8 @@ public static void checkListsConflict(List list1, List> getQueryWithType(ProfileType type) { while (reverse.hasNext()) { String queryId = (String) reverse.next(); ProfileElement profileElement = queryIdToProfileMap.get(queryId); - if (profileElement == null){ + if (profileElement == null) { continue; } Map infoStrings = profileElement.infoStrings; @@ -267,11 +267,12 @@ public List> getLoadJobTaskList(String jobId) throws AnalysisExcept return builder.getSubTaskInfo(); } - public List getFragmentsAndInstances(String queryId) throws AnalysisException{ + public List getFragmentsAndInstances(String queryId) + throws AnalysisException { return getMultiProfileTreeBuilder(queryId).getFragmentInstances(queryId); } - private MultiProfileTreeBuilder getMultiProfileTreeBuilder(String jobId) throws AnalysisException{ + private MultiProfileTreeBuilder getMultiProfileTreeBuilder(String jobId) throws AnalysisException { readLock.lock(); try { ProfileElement element = queryIdToProfileMap.get(jobId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 4a27d9ca5ec65b..20aac35971c2e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -190,15 +190,15 @@ public static DataProperty analyzeDataProperty(Map properties, D // check remote_storage_resource and remote_storage_cooldown_time if ((!hasRemoteCooldown && hasRemoteStorageResource) || (hasRemoteCooldown && !hasRemoteStorageResource)) { - throw new AnalysisException("Invalid data property, " + - "`remote_storage_resource` and `remote_storage_cooldown_time` must be used together."); + throw new AnalysisException("Invalid data property, " + + "`remote_storage_resource` and `remote_storage_cooldown_time` must be used together."); } if (hasRemoteStorageResource && hasRemoteCooldown) { // check remote resource Resource resource = Catalog.getCurrentCatalog().getResourceMgr().getResource(remoteStorageResourceName); if (resource == null) { - throw new AnalysisException("Invalid data property, " + - "`remote_storage_resource` [" + remoteStorageResourceName + "] dose not exist."); + throw new AnalysisException("Invalid data property, " + + "`remote_storage_resource` [" + remoteStorageResourceName + "] dose not exist."); } // check remote storage cool down timestamp if (remoteCooldownTimeStamp <= currentTimeMs) { @@ -370,9 +370,9 @@ public static Set analyzeBloomFilterColumns(Map properti found = true; break; } else { - throw new AnalysisException("Bloom filter index only used in columns of" + - " UNIQUE_KEYS/DUP_KEYS table or key columns of AGG_KEYS table." + - " invalid column: " + bfColumn); + throw new AnalysisException("Bloom filter index only used in columns of" + + " UNIQUE_KEYS/DUP_KEYS table or key columns of AGG_KEYS table." + + " invalid column: " + bfColumn); } } } @@ -492,7 +492,7 @@ public static String analyzeType(Map properties) throws Analysis return type; } - public static Type analyzeSequenceType(Map properties, KeysType keysType) throws AnalysisException{ + public static Type analyzeSequenceType(Map properties, KeysType keysType) throws AnalysisException { String typeStr = null; String propertyName = PROPERTIES_FUNCTION_COLUMN + "." + PROPERTIES_SEQUENCE_TYPE; if (properties != null && properties.containsKey(propertyName)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java index bf36a05cbbc59f..0a40cec59b1604 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java @@ -38,7 +38,7 @@ public class RangeUtils { public static final Comparator> RANGE_MAP_ENTRY_COMPARATOR = - Comparator.comparing(o -> (((RangePartitionItem)o.getValue()).getItems()).lowerEndpoint()); + Comparator.comparing(o -> (((RangePartitionItem) o.getValue()).getItems()).lowerEndpoint()); public static final Comparator RANGE_COMPARATOR = Comparator.comparing(o -> ((RangePartitionItem) o).getItems().lowerEndpoint()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java index f51ae5f674bcca..0f341f6397be68 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java @@ -85,14 +85,14 @@ public static void printThreadInfo(PrintWriter stream, String title) { stream.println("Process Thread Dump: " + title); stream.println(threadIds.length + " active threads"); - for (long tid: threadIds) { + for (long tid : threadIds) { ThreadInfo info = threadBean.getThreadInfo(tid, stackDepth); if (info == null) { stream.println(" Inactive"); continue; } - stream.println("Thread " + - getTaskName(info.getThreadId(), info.getThreadName()) + ":"); + stream.println("Thread " + + getTaskName(info.getThreadId(), info.getThreadName()) + ":"); Thread.State state = info.getThreadState(); stream.println(" State: " + state); stream.println(" Blocked count: " + info.getBlockedCount()); @@ -105,11 +105,11 @@ public static void printThreadInfo(PrintWriter stream, String title) { stream.println(" Waiting on " + info.getLockName()); } else if (state == Thread.State.BLOCKED) { stream.println(" Blocked on " + info.getLockName()); - stream.println(" Blocked by " + - getTaskName(info.getLockOwnerId(), info.getLockOwnerName())); + stream.println(" Blocked by " + + getTaskName(info.getLockOwnerId(), info.getLockOwnerName())); } stream.println(" Stack:"); - for (StackTraceElement frame: info.getStackTrace()) { + for (StackTraceElement frame : info.getStackTrace()) { stream.println(" " + frame.toString()); } } @@ -151,7 +151,7 @@ public static void logThreadInfo(Logger log, String title, long minInterval) { * @return the correctly typed Class of the given object. */ public static Class getClass(T o) { - return (Class)o.getClass(); + return (Class) o.getClass(); } // methods to support testing diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java index ba9305cbfaf634..ff4a476856c1aa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java @@ -423,7 +423,8 @@ public String saveToFile(long dbId, String catalog, String fileName) throws DdlE outputStream.close(); if (!checkMd5(file, smallFile.md5)) { - throw new DdlException("write file " + fileName +" failed. md5 is invalid. expected: " + smallFile.md5); + throw new DdlException("write file " + fileName + + " failed. md5 is invalid. expected: " + smallFile.md5); } } catch (IOException e) { LOG.warn("failed to write file: {}", fileName, e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java index 2a140b2e2edba0..50c60386b8b4cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java @@ -32,7 +32,7 @@ public class SqlBlockUtil { public static final Long LONG_MINUS_ONE = -1L; - public static void checkSqlAndSqlHashSetBoth(String sql, String sqlHash) throws AnalysisException{ + public static void checkSqlAndSqlHashSetBoth(String sql, String sqlHash) throws AnalysisException { if (!STRING_DEFAULT.equals(sql) && !STRING_DEFAULT.equals(sqlHash)) { throw new AnalysisException("Only sql or sqlHash can be configured"); } @@ -75,10 +75,13 @@ public static Boolean isSqlBlockLimitationsNull(Long partitionNum, Long tabletNu // alter operation not allowed to change other properties that not set public static void checkAlterValidate(SqlBlockRule sqlBlockRule) throws AnalysisException { if (!STRING_DEFAULT.equals(sqlBlockRule.getSql())) { - if (!STRING_DEFAULT.equals(sqlBlockRule.getSqlHash()) && StringUtils.isNotEmpty(sqlBlockRule.getSqlHash())) { + if (!STRING_DEFAULT.equals(sqlBlockRule.getSqlHash()) + && StringUtils.isNotEmpty(sqlBlockRule.getSqlHash())) { throw new AnalysisException("Only sql or sqlHash can be configured"); - } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality()) - &&!isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { + } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), + sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality()) + && !isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(), + sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE); } } else if (!STRING_DEFAULT.equals(sqlBlockRule.getSqlHash())) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java index 891852d8603490..4ca18e3ce02e96 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java @@ -72,7 +72,7 @@ public class Util { TYPE_STRING_MAP.put(PrimitiveType.HLL, "varchar(%d)"); TYPE_STRING_MAP.put(PrimitiveType.BOOLEAN, "bool"); TYPE_STRING_MAP.put(PrimitiveType.BITMAP, "bitmap"); - TYPE_STRING_MAP.put(PrimitiveType.QUANTILE_STATE,"quantile_state"); + TYPE_STRING_MAP.put(PrimitiveType.QUANTILE_STATE, "quantile_state"); TYPE_STRING_MAP.put(PrimitiveType.ARRAY, "Array<%s>"); TYPE_STRING_MAP.put(PrimitiveType.NULL_TYPE, "null"); } @@ -187,8 +187,8 @@ public static List shellSplit(CharSequence string) { boolean escaping = false; char quoteChar = ' '; boolean quoting = false; - StringBuilder current = new StringBuilder() ; - for (int i = 0; i B) { - out.write((int)(source & (B - 1) | B)); + out.write((int) (source & (B - 1) | B)); source = source >> 7; } out.write((int) (source & (B - 1))); @@ -406,7 +406,7 @@ public static long decodeVarint64(DataInput in) throws IOException { while (true) { int oneByte = in.readUnsignedByte(); boolean isEnd = (oneByte & B) == 0; - result = result | ((long)(oneByte & B - 1) << (shift * 7)); + result = result | ((long) (oneByte & B - 1) << (shift * 7)); if (isEnd) { break; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java index dfcadcef3d8c83..c6886ae7225c10 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java @@ -113,7 +113,7 @@ protected void runAfterCatalogReady() { // only add new job when it's work time if (itsTime() && getJobNum() == 0) { List chosenTabletIds = chooseTablets(); - for(Long tabletId: chosenTabletIds) { + for (Long tabletId : chosenTabletIds) { CheckConsistencyJob job = new CheckConsistencyJob(tabletId); addJob(job); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java index cfd6969f3b6097..08ccf49641df0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java @@ -275,7 +275,7 @@ public List> getHelperNodes() { // 2. get electable fe host from remote boolean ok = true; List> feHostPorts = null; - while(true) { + while (true) { try { feHostPorts = getElectableGroupHostPorts(); if (feHostPorts == null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsMajorVersion.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsMajorVersion.java index d16fbd81d2876e..6452210e3ccfb5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsMajorVersion.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsMajorVersion.java @@ -89,8 +89,8 @@ public static EsMajorVersion parse(String version) throws DorisEsException { if (version.startsWith("8.")) { return new EsMajorVersion((byte) 8, version); } - throw new DorisEsException("Unsupported/Unknown ES Cluster version [" + version + "]." + - "Highest supported version is [" + LATEST.version + "]."); + throw new DorisEsException("Unsupported/Unknown ES Cluster version [" + version + "]." + + "Highest supported version is [" + LATEST.version + "]."); } @Override @@ -104,8 +104,7 @@ public boolean equals(Object o) { EsMajorVersion version = (EsMajorVersion) o; - return major == version.major && - version.equals(version.version); + return major == version.major && version.equals(version.version); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java index b8c757e7b08cd6..1504e7cb34d25d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java @@ -218,18 +218,18 @@ public int hashCode() { @Override public String toString() { - return "EsNodeInfo{" + - "id='" + id + '\'' + - ", name='" + name + '\'' + - ", host='" + host + '\'' + - ", ip='" + ip + '\'' + - ", publishAddress=" + publishAddress + - ", hasHttp=" + hasHttp + - ", isClient=" + isClient + - ", isData=" + isData + - ", isIngest=" + isIngest + - ", hasThrift=" + hasThrift + - ", thriftAddress=" + thriftAddress + - '}'; + return "EsNodeInfo{" + + "id='" + id + '\'' + + ", name='" + name + '\'' + + ", host='" + host + '\'' + + ", ip='" + ip + '\'' + + ", publishAddress=" + publishAddress + + ", hasHttp=" + hasHttp + + ", isClient=" + isClient + + ", isData=" + isData + + ", isIngest=" + isIngest + + ", hasThrift=" + hasThrift + + ", thriftAddress=" + thriftAddress + + '}'; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardRouting.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardRouting.java index 01462c599034f2..7c15e7e0a9bcf3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardRouting.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardRouting.java @@ -84,13 +84,13 @@ public String getNodeId() { @Override public String toString() { - return "EsShardRouting{" + - "indexName='" + indexName + '\'' + - ", shardId=" + shardId + - ", isPrimary=" + isPrimary + - ", address=" + address + - ", httpAddress=" + httpAddress + - ", nodeId='" + nodeId + '\'' + - '}'; + return "EsShardRouting{" + + "indexName='" + indexName + '\'' + + ", shardId=" + shardId + + ", isPrimary=" + isPrimary + + ", address=" + address + + ", httpAddress=" + httpAddress + + ", nodeId='" + nodeId + '\'' + + '}'; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java index ab0aba4dc701b7..96fe439a19be66 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java @@ -45,8 +45,8 @@ public class IcebergCatalogMgr { private static final Logger LOG = LogManager.getLogger(IcebergCatalogMgr.class); - private static final String PROPERTY_MISSING_MSG = "Iceberg %s is null. " + - "Please add properties('%s'='xxx') when create iceberg database."; + private static final String PROPERTY_MISSING_MSG = "Iceberg %s is null. " + + "Please add properties('%s'='xxx') when create iceberg database."; // hive metastore uri -> iceberg catalog // used to cache iceberg catalogs diff --git a/fe/fe-core/src/main/java/org/apache/doris/ha/BDBHA.java b/fe/fe-core/src/main/java/org/apache/doris/ha/BDBHA.java index c08011a6035480..e6fbcd31831b40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ha/BDBHA.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ha/BDBHA.java @@ -214,7 +214,7 @@ public boolean removeElectableNode(String nodeName) { public void addHelperSocket(String ip, Integer port) { ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); Set helperSockets = Sets.newHashSet(replicationGroupAdmin.getHelperSockets()); - InetSocketAddress newHelperSocket = new InetSocketAddress(ip,port); + InetSocketAddress newHelperSocket = new InetSocketAddress(ip, port); if (!helperSockets.contains(newHelperSocket)) { helperSockets.add(newHelperSocket); environment.setNewReplicationGroupAdmin(helperSockets); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java index 26e1318f8d8fdf..dec2419a286cc9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java @@ -69,7 +69,7 @@ public SessionValue getSessionValue(List sessionIds) { return null; } - public void removeSession(String sessionId){ + public void removeSession(String sessionId) { if (!Strings.isNullOrEmpty(sessionId)) { authSessions.invalidate(sessionId); LOG.debug("remove session id: {}, left size: {}", sessionId, authSessions.size()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpServer.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpServer.java index bca898cc13b2cc..4308c72306094a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpServer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpServer.java @@ -39,7 +39,7 @@ public class HttpServer extends SpringBootServletInitializer { private int port; private int acceptors; private int selectors; - private int maxHttpPostSize ; + private int maxHttpPostSize; private int workers; private int minThreads; @@ -54,13 +54,21 @@ public void setMaxHttpHeaderSize(int maxHttpHeaderSize) { this.maxHttpHeaderSize = maxHttpHeaderSize; } - public int getMinThreads() { return minThreads; } + public int getMinThreads() { + return minThreads; + } - public void setMinThreads(int minThreads) { this.minThreads = minThreads; } + public void setMinThreads(int minThreads) { + this.minThreads = minThreads; + } - public int getMaxThreads() { return maxThreads; } + public int getMaxThreads() { + return maxThreads; + } - public void setMaxThreads(int maxThreads) { this.maxThreads = maxThreads; } + public void setMaxThreads(int maxThreads) { + this.maxThreads = maxThreads; + } public void setWorkers(int workers) { this.workers = workers; @@ -103,7 +111,7 @@ public void start() { properties.put("server.jetty.threadPool.minThreads", this.minThreads); properties.put("server.max-http-header-size", this.maxHttpHeaderSize); //Worker thread pool is not set by default, set according to your needs - if(this.workers > 0) { + if (this.workers > 0) { properties.put("server.jetty.workers", this.workers); } // This is to disable the spring-boot-devtools restart feature. diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/IllegalArgException.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/IllegalArgException.java index c66acaeeb6af84..7f69a8c94e0932 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/IllegalArgException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/IllegalArgException.java @@ -18,17 +18,17 @@ package org.apache.doris.httpv2; public class IllegalArgException extends Exception { - private static final long serialVersionUID = 3344697787301861667L; + private static final long serialVersionUID = 3344697787301861667L; - public IllegalArgException() { - super(""); - } + public IllegalArgException() { + super(""); + } - public IllegalArgException(String msg) { - super(msg); - } + public IllegalArgException(String msg) { + super(msg); + } - public IllegalArgException(String msg, Throwable cause) { - super(msg, cause); - } + public IllegalArgException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java index afa07db96c9642..47c20b850537a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java @@ -40,7 +40,7 @@ public void addInterceptors(InterceptorRegistry registry) { registry.addInterceptor(new AuthInterceptor()) .addPathPatterns("/rest/v1/**") .excludePathPatterns("/", "/api/**", "/rest/v1/login", "/rest/v1/logout", "/static/**", "/metrics") - .excludePathPatterns("/image","/info","/version","/put","/journal_id","/role","/check","/dump"); + .excludePathPatterns("/image", "/info", "/version", "/put", "/journal_id", "/role", "/check", "/dump"); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java index 26be6d274cd22c..c8f29cf30f0103 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java @@ -222,9 +222,10 @@ private List getFileSystem(FileSystem fileSystem) { for (OSFileStore fs : fsArray) { long usable = fs.getUsableSpace(); long total = fs.getTotalSpace(); - fsInfo.add(String.format("        %s (%s) [%s] %s of %s free (%.1f%%), %s of %s files free (%.1f%%) is %s " + - (fs.getLogicalVolume() != null && fs.getLogicalVolume().length() > 0 ? "[%s]" : "%s") + - " and is mounted at %s", + fsInfo.add(String.format("        " + + "%s (%s) [%s] %s of %s free (%.1f%%), %s of %s files free (%.1f%%) is %s " + + (fs.getLogicalVolume() != null && fs.getLogicalVolume().length() > 0 ? "[%s]" : "%s") + + " and is mounted at %s", fs.getName(), fs.getDescription().isEmpty() ? "file system" : fs.getDescription(), fs.getType(), FormatUtil.formatBytes(usable), FormatUtil.formatBytes(fs.getTotalSpace()), 100d * usable / total, FormatUtil.formatValue(fs.getFreeInodes(), ""), FormatUtil.formatValue(fs.getTotalInodes(), ""), diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java index 81ff51ab2be98a..b1bb88546230bd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java @@ -65,7 +65,7 @@ public Object system(HttpServletRequest request) { currentPath = "/"; } LOG.debug("get /system request, thread id: {}", Thread.currentThread().getId()); - ResponseEntity entity = appendSystemInfo(currentPath, currentPath,request); + ResponseEntity entity = appendSystemInfo(currentPath, currentPath, request); return entity; } @@ -87,7 +87,6 @@ protected ProcNodeInterface getProcNode(String path) { private ResponseEntity appendSystemInfo(String procPath, String path, HttpServletRequest request) { UrlValidator validator = new UrlValidator(); - Map map = new HashMap<>(); ProcNodeInterface procNode = getProcNode(procPath); if (procNode == null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/RestResult.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/RestResult.java index 881c22b1ebce93..99129453a5897a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/RestResult.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/RestResult.java @@ -39,7 +39,7 @@ public String toJson() { Gson gson = new Gson(); addResultEntry("status", status); if (status != ActionStatus.OK) { - addResultEntry("msg", msg); + addResultEntry("msg", msg); } return gson.toJson(resultMap); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java index 31ea134792d0d1..379a33eea4b0cf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java @@ -36,19 +36,19 @@ public class ServletTraceIterceptor implements Filter { private static final Logger LOG = LogManager.getLogger(ServletTraceIterceptor.class); @Override - public void init( FilterConfig filterConfig) throws ServletException { + public void init(FilterConfig filterConfig) throws ServletException { } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; HttpServletResponse httpResponse = (HttpServletResponse) response; if ("TRACE".equalsIgnoreCase(httpRequest.getMethod())) { httpResponse.setStatus(HttpServletResponse.SC_METHOD_NOT_ALLOWED); - LOG.warn ("Trace method is not allowed to be called, has been intercepted, IP address:" - + request.getRemoteAddr()); + LOG.warn("Trace method is not allowed to be called, has been intercepted, IP address:" + + request.getRemoteAddr()); return; } chain.doFilter(request, response); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java index bf1cb562c4bd4d..aab70c37422bd9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java @@ -61,7 +61,7 @@ public class LoadAction extends RestBaseController { @RequestMapping(path = "/api/{" + DB_KEY + "}/{" + TABLE_KEY + "}/_load", method = RequestMethod.PUT) public Object load(HttpServletRequest request, HttpServletResponse response, @PathVariable(value = DB_KEY) String db, @PathVariable(value = TABLE_KEY) String table) { - if(Config.disable_mini_load) { + if (Config.disable_mini_load) { ResponseEntity entity = ResponseEntityBuilder.notFound("The mini load operation has been disabled by default, if you need to add disable_mini_load=false in fe.conf."); return entity; } else { @@ -96,7 +96,6 @@ private Object executeWithoutPassword(HttpServletRequest request, try { String dbName = db; String tableName = table; - String urlStr = request.getRequestURI(); // A 'Load' request must have 100-continue header if (request.getHeader(HttpHeaderNames.EXPECT.toString()) == null) { return new RestBaseResult("There is no 100-continue header"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java index a0af9a1b8e5956..7354ca0a33091c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java @@ -83,11 +83,10 @@ public RedirectView redirectTo(HttpServletRequest request, TNetworkAddress addr) //I don't know why the format username@default_cluster is used in parseAuthInfo. //It is estimated that it is compatible with the standard format of username:password. //So here we feel that we can assemble it completely by hand. - String clusterName = ConnectContext.get() == null ? - SystemInfoService.DEFAULT_CLUSTER : ConnectContext.get().getClusterName(); - userInfo = ClusterNamespace.getNameFromFullName(authInfo.fullUserName) + - "@" + clusterName + - ":" + authInfo.password; + String clusterName = ConnectContext.get() == null + ? SystemInfoService.DEFAULT_CLUSTER : ConnectContext.get().getClusterName(); + userInfo = ClusterNamespace.getNameFromFullName(authInfo.fullUserName) + + "@" + clusterName + ":" + authInfo.password; } try { urlObj = new URI(urlStr); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java index 3590c88988fbb0..c579df68cd87bb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java @@ -116,7 +116,7 @@ protected Object set_config(HttpServletRequest request, HttpServletResponse resp @Setter @AllArgsConstructor - public static class ErrConfig{ + public static class ErrConfig { @SerializedName(value = "config_name") @JsonProperty("config_name") private String configName; @@ -143,7 +143,7 @@ public String getErrInfo() { @Getter @Setter @AllArgsConstructor - public static class SetConfigEntity{ + public static class SetConfigEntity { @SerializedName(value = "set") @JsonProperty("set") Map setConfigs; diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java index 6f440068d5c614..09d3893cd30eb2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java @@ -154,8 +154,8 @@ public Object show_runtime_info(HttpServletRequest request, HttpServletResponse // Get thread count ThreadGroup parentThread; for (parentThread = Thread.currentThread().getThreadGroup(); - parentThread.getParent() != null; - parentThread = parentThread.getParent()) { + parentThread.getParent() != null; + parentThread = parentThread.getParent()) { } feInfo.put("thread_cnt", String.valueOf(parentThread.activeCount())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java index 79a0b8276b020f..3f1469a86ea8d3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java @@ -342,8 +342,8 @@ public Object configurationInfo(HttpServletRequest request, HttpServletResponse } return ResponseEntityBuilder.ok(new NodeInfo(BE_CONFIG_TITLE_NAMES, data)); } - return ResponseEntityBuilder.badRequest("Unsupported type: " + type + ". Only types of fe or be are " + - "supported"); + return ResponseEntityBuilder.badRequest("Unsupported type: " + type + ". Only types of fe or be are " + + "supported"); } // Use thread pool to concurrently fetch configuration information from specified fe or be nodes. @@ -523,8 +523,8 @@ private void addSetConfigErrNode(Map configs, Pair> failedTotal) { for (Map.Entry entry : configs.entrySet()) { Map failed = Maps.newHashMap(); - addFailedConfig(entry.getKey(), entry.getValue(), hostPort.first + ":" + - hostPort.second, err, failed); + addFailedConfig(entry.getKey(), entry.getValue(), hostPort.first + ":" + + hostPort.second, err, failed); failedTotal.add(failed); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java index dd2b283f05ad49..811cae6b5e170e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java @@ -273,8 +273,8 @@ private Schema generateSchame(List columns) { schema.setIsNull(String.valueOf(column.isAllowNull())); schema.setDefaultVal(column.getDefaultValue()); schema.setKey(String.valueOf(column.isKey())); - schema.setAggrType(column.getAggregationType() == null ? - "None" : column.getAggregationType().toString()); + schema.setAggrType(column.getAggregationType() == null + ? "None" : column.getAggregationType().toString()); schema.setComment(column.getComment()); } return schema; @@ -291,7 +291,7 @@ private void generateResult(Table tbl, boolean isBaseIndex, propMap.put("isBase", isBaseIndex); propMap.put("tableType", tbl.getEngine()); if (tbl.getType() == Table.TableType.OLAP) { - propMap.put("keyType", ((OlapTable)tbl).getKeysType()); + propMap.put("keyType", ((OlapTable) tbl).getKeysType()); } propMap.put("schema", generateSchema(tbl.getBaseSchema())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/HttpUtil.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/HttpUtil.java index e066c32c23ac6d..a9c10e84de2d10 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/HttpUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/HttpUtil.java @@ -26,9 +26,9 @@ public class HttpUtil { public static boolean isKeepAlive(HttpServletRequest request) { - if (!request.getHeader(HttpHeaders.CONNECTION).equals("close") && - (request.getProtocol().equals("") || - request.getHeader(HttpHeaders.CONNECTION).equals("keep-alive"))) { + if (!request.getHeader(HttpHeaders.CONNECTION).equals("close") + && (request.getProtocol().equals("") + || request.getHeader(HttpHeaders.CONNECTION).equals("keep-alive"))) { return true; } return false; @@ -53,7 +53,7 @@ public static String getBody(HttpServletRequest request) { data.append(new String(line.getBytes("utf-8"))); } } catch (IOException e) { - } finally { + // CHECKSTYLE IGNORE THIS LINE } return data.toString(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java index ab6666066374dd..1627b755132b34 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java @@ -106,7 +106,7 @@ private SubmitResult load() throws Exception { File loadFile = checkAndGetFile(loadContext.file); try (BufferedOutputStream bos = new BufferedOutputStream(conn.getOutputStream()); - BufferedInputStream bis = new BufferedInputStream(new FileInputStream(loadFile));) { + BufferedInputStream bis = new BufferedInputStream(new FileInputStream(loadFile));) { int i; while ((i = bis.read()) > 0) { bos.write(i); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java index aeb2b91ba4568d..e29db73d6f6afc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java @@ -174,7 +174,7 @@ private ExecutionResultSet generateResultSet(ResultSet rs, long startTime) throw // index start from 1 for (int i = 1; i <= colNum; ++i) { String type = rs.getMetaData().getColumnTypeName(i); - if("DATE".equalsIgnoreCase(type) || "DATETIME".equalsIgnoreCase(type)){ + if ("DATE".equalsIgnoreCase(type) || "DATETIME".equalsIgnoreCase(type)) { row.add(rs.getString(i)); } else { row.add(rs.getObject(i)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java index e7f718f6c7af2f..302341452ae4e2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java @@ -95,7 +95,7 @@ public synchronized TmpFile upload(UploadFile uploadFile) throws TmpFileExceptio throw new TmpFileException("Total file size will exceed limit " + MAX_TOTAL_FILE_SIZE_BYTES); } - if(fileMap.size() > MAX_TOTAL_FILE_NUM) { + if (fileMap.size() > MAX_TOTAL_FILE_NUM) { throw new TmpFileException("Number of temp file " + fileMap.size() + " exceed limit " + MAX_TOTAL_FILE_NUM); } @@ -186,7 +186,7 @@ public void setPreview() throws IOException { lines = Lists.newArrayList(); String escapedColSep = Util.escapeSingleRegex(columnSeparator); try (FileReader fr = new FileReader(absPath); - BufferedReader bf = new BufferedReader(fr)) { + BufferedReader bf = new BufferedReader(fr)) { String str; while ((str = bf.readLine()) != null) { String[] cols = str.split(escapedColSep, -1); // -1 to keep the last empty column diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index 8d138e51e984ed..5cf3466a9b8df3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -535,7 +535,7 @@ public void readFields(DataInput in) throws IOException { break; } case OperationType.OP_CREATE_RESOURCE: - case OperationType.OP_ALTER_RESOURCE:{ + case OperationType.OP_ALTER_RESOURCE: { data = Resource.read(in); isRead = true; break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java index 773e20c5ffb15e..638a0204d4ac79 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java @@ -19,8 +19,8 @@ import org.apache.doris.catalog.Catalog; import org.apache.doris.common.Config; -import org.apache.doris.ha.BDBStateChangeListener; import org.apache.doris.ha.BDBHA; +import org.apache.doris.ha.BDBStateChangeListener; import org.apache.doris.ha.HAProtocol; import com.sleepycat.je.Database; @@ -386,7 +386,7 @@ public void close() { } } - // Close environment + // Close environment public void closeReplicatedEnvironment() { if (replicatedEnvironment != null) { try { @@ -398,7 +398,8 @@ public void closeReplicatedEnvironment() { } } } - // open environment + + // open environment public void openReplicatedEnvironment(File envHome) { for (int i = 0; i < RETRY_TIME; i++) { try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java index 4799fc22d6a476..4d787f866b2294 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java @@ -170,8 +170,8 @@ public synchronized void write(short op, Writable writable) throws IOException { LOG.warn("master can not achieve quorum. write timestamp fail. but will not exit."); return; } - String msg = "write bdb failed. will exit. journalId: " + id + ", bdb database Name: " + - currentJournalDB.getDatabaseName(); + String msg = "write bdb failed. will exit. journalId: " + id + ", bdb database Name: " + + currentJournalDB.getDatabaseName(); LOG.error(msg); Util.stdoutWithTime(msg); System.exit(-1); diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java index 1f40bb3b80c2ee..27326187a9237e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java @@ -48,8 +48,8 @@ public class LdapAuthenticate { { if (LdapConfig.user_max_connections <= 0 || LdapConfig.user_max_connections > 10000) { - LOG.warn("Ldap config user_max_connections is invalid. It should be set between 1 and 10000. " + - "And now, it is set to the default value."); + LOG.warn("Ldap config user_max_connections is invalid. It should be set between 1 and 10000. " + + "And now, it is set to the default value."); } else { userMaxConn = LdapConfig.user_max_connections; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java index cc41ab22ff5111..8d0d37643b49f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java @@ -524,7 +524,7 @@ public void readFields(DataInput in) throws IOException { lineDelimiter = Text.readString(in); isNegative = in.readBoolean(); // partitionIds - { + { // CHECKSTYLE IGNORE THIS LINE int partSize = in.readInt(); if (partSize > 0) { partitionIds = Lists.newArrayList(); @@ -532,9 +532,9 @@ public void readFields(DataInput in) throws IOException { partitionIds.add(in.readLong()); } } - } + } // CHECKSTYLE IGNORE THIS LINE // fileFieldName - { + { // CHECKSTYLE IGNORE THIS LINE int fileFieldNameSize = in.readInt(); if (fileFieldNameSize > 0) { fileFieldNames = Lists.newArrayList(); @@ -542,24 +542,24 @@ public void readFields(DataInput in) throws IOException { fileFieldNames.add(Text.readString(in)); } } - } + } // CHECKSTYLE IGNORE THIS LINE // fileInfos - { + { // CHECKSTYLE IGNORE THIS LINE int size = in.readInt(); filePaths = Lists.newArrayList(); for (int i = 0; i < size; ++i) { filePaths.add(Text.readString(in)); } - } + } // CHECKSTYLE IGNORE THIS LINE // expr column map Map exprColumnMap = Maps.newHashMap(); - { + { // CHECKSTYLE IGNORE THIS LINE int size = in.readInt(); for (int i = 0; i < size; ++i) { final String name = Text.readString(in); exprColumnMap.put(name, Expr.readIn(in)); } - } + } // CHECKSTYLE IGNORE THIS LINE // file format if (in.readBoolean()) { fileFormat = Text.readString(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java index 1ae077ec3ddcac..2a1cf4ff0b8e59 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java @@ -228,10 +228,10 @@ public void readFields(DataInput in) throws IOException { int mapSize = in.readInt(); // just for compatibility, the following read objects are useless for (int i = 0; i < mapSize; ++i) { - long id = in.readLong(); + long id = in.readLong(); // CHECKSTYLE IGNORE THIS LINE int listSize = in.readInt(); for (int j = 0; j < listSize; ++j) { - BrokerFileGroup fileGroup = BrokerFileGroup.read(in); + BrokerFileGroup fileGroup = BrokerFileGroup.read(in); // CHECKSTYLE IGNORE THIS LINE } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java index 9a9e13c9ad37ad..ef7bd7e9734714 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java @@ -160,8 +160,8 @@ public void process(DeleteStmt stmt) throws DdlException, QueryStateException { } if (noPartitionSpecified) { - if (olapTable.getPartitionInfo().getType() == PartitionType.RANGE || - olapTable.getPartitionInfo().getType() == PartitionType.LIST) { + if (olapTable.getPartitionInfo().getType() == PartitionType.RANGE + || olapTable.getPartitionInfo().getType() == PartitionType.LIST) { if (!ConnectContext.get().getSessionVariable().isDeleteWithoutPartition()) { throw new DdlException("This is a range or list partitioned table." + " You should specify partition in delete stmt, or set delete_without_partition to true"); @@ -317,8 +317,8 @@ public void process(DeleteStmt stmt) throws DdlException, QueryStateException { case UN_QUORUM: LOG.warn("delete job timeout: transactionId {}, timeout {}, {}", transactionId, timeoutMs, errMsg); cancelJob(deleteJob, CancelType.TIMEOUT, "delete job timeout"); - throw new DdlException("failed to execute delete. transaction id " + transactionId + - ", timeout(ms) " + timeoutMs + ", " + errMsg); + throw new DdlException("failed to execute delete. transaction id " + transactionId + + ", timeout(ms) " + timeoutMs + ", " + errMsg); case QUORUM_FINISHED: case FINISHED: try { @@ -531,8 +531,8 @@ private void checkDeleteV2(OlapTable table, List partitions, List
 partitions, List
 partitions, List
 sessionVariables = Maps.newHashMap();
 
     private List exportColumns = Lists.newArrayList();
-    private String columns ;
+    private String columns;
 
 
     public ExportJob() {
@@ -364,7 +364,7 @@ private void plan() throws UserException {
 
         // add conjunct
         if (whereExpr != null) {
-            for (ScanNode scanNode: scanNodes) {
+            for (ScanNode scanNode : scanNodes) {
                 scanNode.addConjuncts(whereExpr.getConjuncts());
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java
index bec97bc58ff656..5a91bee9cb833c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java
@@ -20,7 +20,6 @@
 import org.apache.doris.alter.SchemaChangeHandler;
 import org.apache.doris.analysis.Analyzer;
 import org.apache.doris.analysis.BinaryPredicate;
-import org.apache.doris.analysis.CancelLoadStmt;
 import org.apache.doris.analysis.CastExpr;
 import org.apache.doris.analysis.DataDescription;
 import org.apache.doris.analysis.Expr;
@@ -124,7 +123,6 @@
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
 
 public class Load {
     private static final Logger LOG = LogManager.getLogger(Load.class);
@@ -630,8 +628,8 @@ public static void checkAndCreateSource(Database db, DataDescription dataDescrip
             }
 
             // check partition
-            if (dataDescription.getPartitionNames() != null &&
-                    table.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) {
+            if (dataDescription.getPartitionNames() != null
+                    && table.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) {
                 ErrorReport.reportDdlException(ErrorCode.ERR_PARTITION_CLAUSE_NO_ALLOWED);
             }
 
@@ -2021,7 +2019,6 @@ public LinkedList> getLoadJobInfosByDb(long dbId, String dbName
     }
 
     public long getLatestJobIdByLabel(long dbId, String labelValue) {
-        LoadJob job = null;
         long jobId = 0;
         readLock();
         try {
@@ -2043,7 +2040,6 @@ public long getLatestJobIdByLabel(long dbId, String labelValue) {
 
                 if (currJobId > jobId) {
                     jobId = currJobId;
-                    job = loadJob;
                 }
             }
         } finally {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java
index 599fd68ec94aa5..aec316a6ad265b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java
@@ -328,7 +328,6 @@ public long getEtlFinishTimeMs() {
     public void setEtlFinishTimeMs(long etlFinishTimeMs) {
         this.etlFinishTimeMs = etlFinishTimeMs;
         if (etlStartTimeMs > -1) {
-            long etlCostMs = etlFinishTimeMs - etlStartTimeMs;
             switch (etlJobType) {
                 case HADOOP:
                     break;
@@ -356,18 +355,6 @@ public long getLoadFinishTimeMs() {
 
     public void setLoadFinishTimeMs(long loadFinishTimeMs) {
         this.loadFinishTimeMs = loadFinishTimeMs;
-        long loadCostMs = loadFinishTimeMs - loadStartTimeMs;
-        long totalCostMs = loadFinishTimeMs - createTimeMs;
-        switch (etlJobType) {
-            case HADOOP:
-                break;
-            case MINI:
-                break;
-            case BROKER:
-                break;
-            default:
-                break;
-        }
     }
 
     public long getQuorumFinishTimeMs() {
@@ -857,8 +844,10 @@ public void readFields(DataInput in) throws IOException {
         timeoutSecond = in.readInt();
         maxFilterRatio = in.readDouble();
 
+        // CHECKSTYLE OFF
         boolean deleteFlag = false;
         deleteFlag = in.readBoolean();
+        // CHECKSTYLE ON
 
         state = JobState.valueOf(Text.readString(in));
         progress = in.readInt();
@@ -925,10 +914,12 @@ public void readFields(DataInput in) throws IOException {
         }
 
         if (version >= 3 && version < 7) {
+            // CHECKSTYLE OFF
             // bos 3 parameters
             String bosEndpoint = Text.readString(in);
             String bosAccessKey = Text.readString(in);
             String bosSecretAccessKey = Text.readString(in);
+            // CHECKSTYLE ON
         }
 
         this.priority = TPriority.valueOf(Text.readString(in));
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/MysqlLoadErrorHub.java b/fe/fe-core/src/main/java/org/apache/doris/load/MysqlLoadErrorHub.java
index 5c3d1b17014318..9ba77ebb02479b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/MysqlLoadErrorHub.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/MysqlLoadErrorHub.java
@@ -115,7 +115,7 @@ public String getTable() {
         public void write(DataOutput out) throws IOException {
             Text.writeString(out, host);
             out.writeInt(port);
-            Text.writeString(out, user) ;
+            Text.writeString(out, user);
             Text.writeString(out, passwd);
             Text.writeString(out, db);
             Text.writeString(out, table);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java
index 138b5addeb58dc..961723c0efe21b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java
@@ -110,7 +110,7 @@ public int compare(StreamLoadItem s1, StreamLoadItem s2) {
 
 
     public StreamLoadRecordMgr(String name, long intervalMs) {
-            super(name, intervalMs);
+        super(name, intervalMs);
     }
 
     public void addStreamLoadRecord(long dbId, String label, StreamLoadRecord streamLoadRecord) {
@@ -244,25 +244,37 @@ protected void runAfterCatalogReady() {
                 pullRecordSize += streamLoadRecordBatch.size();
                 long lastStreamLoadTime = -1;
                 for (Map.Entry entry : streamLoadRecordBatch.entrySet()) {
-                    TStreamLoadRecord streamLoadItem= entry.getValue();
-                    String startTime = TimeUtils.longToTimeString(streamLoadItem.getStartTime(), new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"));
-                    String finishTime = TimeUtils.longToTimeString(streamLoadItem.getFinishTime(), new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"));
+                    TStreamLoadRecord streamLoadItem = entry.getValue();
+                    String startTime = TimeUtils.longToTimeString(streamLoadItem.getStartTime(),
+                            new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"));
+                    String finishTime = TimeUtils.longToTimeString(streamLoadItem.getFinishTime(),
+                            new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"));
                     if (LOG.isDebugEnabled()) {
-                        LOG.debug("receive stream load record info from backend: {}. label: {}, db: {}, tbl: {}, user: {}, user_ip: {}," +
-                                        " status: {}, message: {}, error_url: {}, total_rows: {}, loaded_rows: {}, filtered_rows: {}," +
-                                        " unselected_rows: {}, load_bytes: {}, start_time: {}, finish_time: {}.",
-                                backend.getHost(), streamLoadItem.getLabel(), streamLoadItem.getDb(), streamLoadItem.getTbl(), streamLoadItem.getUser(), streamLoadItem.getUserIp(),
-                                streamLoadItem.getStatus(), streamLoadItem.getMessage(), streamLoadItem.getUrl(), streamLoadItem.getTotalRows(), streamLoadItem.getLoadedRows(),
-                                streamLoadItem.getFilteredRows(), streamLoadItem.getUnselectedRows(), streamLoadItem.getLoadBytes(), startTime, finishTime);
+                        LOG.debug("receive stream load record info from backend: {}."
+                                        + " label: {}, db: {}, tbl: {}, user: {}, user_ip: {},"
+                                        + " status: {}, message: {}, error_url: {},"
+                                        + " total_rows: {}, loaded_rows: {}, filtered_rows: {}, unselected_rows: {},"
+                                        + " load_bytes: {}, start_time: {}, finish_time: {}.",
+                                backend.getHost(), streamLoadItem.getLabel(), streamLoadItem.getDb(),
+                                streamLoadItem.getTbl(), streamLoadItem.getUser(), streamLoadItem.getUserIp(),
+                                streamLoadItem.getStatus(), streamLoadItem.getMessage(), streamLoadItem.getUrl(),
+                                streamLoadItem.getTotalRows(), streamLoadItem.getLoadedRows(),
+                                streamLoadItem.getFilteredRows(), streamLoadItem.getUnselectedRows(),
+                                streamLoadItem.getLoadBytes(), startTime, finishTime);
                     }
 
-                    AuditEvent auditEvent = new StreamLoadAuditEvent.AuditEventBuilder().setEventType(EventType.STREAM_LOAD_FINISH)
-                            .setLabel(streamLoadItem.getLabel()).setDb(streamLoadItem.getDb()).setTable(streamLoadItem.getTbl())
-                            .setUser(streamLoadItem.getUser()).setClientIp(streamLoadItem.getUserIp()).setStatus(streamLoadItem.getStatus())
-                            .setMessage(streamLoadItem.getMessage()).setUrl(streamLoadItem.getUrl()).setTotalRows(streamLoadItem.getTotalRows())
-                            .setLoadedRows(streamLoadItem.getLoadedRows()).setFilteredRows(streamLoadItem.getFilteredRows())
-                            .setUnselectedRows(streamLoadItem.getUnselectedRows()).setLoadBytes(streamLoadItem.getLoadBytes())
-                            .setStartTime(startTime).setFinishTime(finishTime).build();
+                    AuditEvent auditEvent =
+                            new StreamLoadAuditEvent.AuditEventBuilder().setEventType(EventType.STREAM_LOAD_FINISH)
+                                    .setLabel(streamLoadItem.getLabel()).setDb(streamLoadItem.getDb())
+                                    .setTable(streamLoadItem.getTbl()).setUser(streamLoadItem.getUser())
+                                    .setClientIp(streamLoadItem.getUserIp()).setStatus(streamLoadItem.getStatus())
+                                    .setMessage(streamLoadItem.getMessage()).setUrl(streamLoadItem.getUrl())
+                                    .setTotalRows(streamLoadItem.getTotalRows())
+                                    .setLoadedRows(streamLoadItem.getLoadedRows())
+                                    .setFilteredRows(streamLoadItem.getFilteredRows())
+                                    .setUnselectedRows(streamLoadItem.getUnselectedRows())
+                                    .setLoadBytes(streamLoadItem.getLoadBytes()).setStartTime(startTime)
+                                    .setFinishTime(finishTime).build();
                     Catalog.getCurrentCatalog().getAuditEventProcessor().handleAuditEvent(auditEvent);
                     if (entry.getValue().getFinishTime() > lastStreamLoadTime) {
                         lastStreamLoadTime = entry.getValue().getFinishTime();
@@ -271,11 +283,15 @@ protected void runAfterCatalogReady() {
                     if (Config.disable_show_stream_load) {
                         continue;
                     }
-                    StreamLoadRecord streamLoadRecord = new StreamLoadRecord(streamLoadItem.getLabel(), streamLoadItem.getDb(), streamLoadItem.getTbl(),
-                            streamLoadItem.getUser(), streamLoadItem.getUserIp(), streamLoadItem.getStatus(), streamLoadItem.getMessage(), streamLoadItem.getUrl(),
-                            String.valueOf(streamLoadItem.getTotalRows()), String.valueOf(streamLoadItem.getLoadedRows()),
-                            String.valueOf(streamLoadItem.getFilteredRows()), String.valueOf(streamLoadItem.getUnselectedRows()),
-                            String.valueOf(streamLoadItem.getLoadBytes()), startTime, finishTime);
+                    StreamLoadRecord streamLoadRecord =
+                            new StreamLoadRecord(streamLoadItem.getLabel(), streamLoadItem.getDb(),
+                                    streamLoadItem.getTbl(), streamLoadItem.getUser(), streamLoadItem.getUserIp(),
+                                    streamLoadItem.getStatus(), streamLoadItem.getMessage(), streamLoadItem.getUrl(),
+                                    String.valueOf(streamLoadItem.getTotalRows()),
+                                    String.valueOf(streamLoadItem.getLoadedRows()),
+                                    String.valueOf(streamLoadItem.getFilteredRows()),
+                                    String.valueOf(streamLoadItem.getUnselectedRows()),
+                                    String.valueOf(streamLoadItem.getLoadBytes()), startTime, finishTime);
 
                     String cluster = streamLoadItem.getCluster();
                     if (Strings.isNullOrEmpty(cluster)) {
@@ -292,7 +308,8 @@ protected void runAfterCatalogReady() {
                         throw new UserException("unknown database, database=" + dbName);
                     }
                     long dbId = db.getId();
-                    Catalog.getCurrentCatalog().getStreamLoadRecordMgr().addStreamLoadRecord(dbId, streamLoadItem.getLabel(), streamLoadRecord);
+                    Catalog.getCurrentCatalog().getStreamLoadRecordMgr()
+                            .addStreamLoadRecord(dbId, streamLoadItem.getLabel(), streamLoadRecord);
                 }
 
                 if (streamLoadRecordBatch.size() > 0) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java
index 4da6079eac8d13..2602ef837a4490 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java
@@ -122,8 +122,8 @@ private void getAllFileStatus() throws UserException {
                     LOG.info("get {} files in file group {} for table {}. size: {}. job: {}, broker: {} ",
                             filteredFileStatuses.size(), groupNum, entry.getKey(), groupFileSize,
                             callback.getCallbackId(),
-                            brokerDesc.getStorageType() == StorageBackend.StorageType.BROKER ?
-                                    BrokerUtil.getAddress(brokerDesc) : brokerDesc.getStorageType());
+                            brokerDesc.getStorageType() == StorageBackend.StorageType.BROKER
+                                    ? BrokerUtil.getAddress(brokerDesc) : brokerDesc.getStorageType());
                     groupNum++;
                 }
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobFinalOperation.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobFinalOperation.java
index 0180df86aa8788..97f0c0ba37411d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobFinalOperation.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobFinalOperation.java
@@ -119,14 +119,14 @@ public void readFields(DataInput in) throws IOException {
 
     @Override
     public String toString() {
-        return "LoadJobEndOperation{" +
-                "id=" + id +
-                ", loadingStatus=" + loadingStatus +
-                ", progress=" + progress +
-                ", loadStartTimestamp=" + loadStartTimestamp +
-                ", finishTimestamp=" + finishTimestamp +
-                ", jobState=" + jobState +
-                ", failMsg=" + failMsg +
-                '}';
+        return "LoadJobEndOperation{"
+                + "id=" + id
+                + ", loadingStatus=" + loadingStatus
+                + ", progress=" + progress
+                + ", loadStartTimestamp=" + loadStartTimestamp
+                + ", finishTimestamp=" + finishTimestamp
+                + ", jobState=" + jobState
+                + ", failMsg=" + failMsg
+                + '}';
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobScheduler.java
index cf065bb8cff18c..e36b980920516b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJobScheduler.java
@@ -63,9 +63,11 @@ private void process() throws InterruptedException {
                 return;
             }
 
-            if (needScheduleJobs.peek() instanceof BrokerLoadJob && !Catalog.getCurrentCatalog().getLoadingLoadTaskScheduler().hasIdleThread()) {
-                LOG.info("Failed to take one broker load job from queue because of loading_load_task_scheduler is full." +
-                        " Waiting for next round. You can try to increase the value of Config.async_loading_load_task_pool_size");
+            if (needScheduleJobs.peek() instanceof BrokerLoadJob
+                    && !Catalog.getCurrentCatalog().getLoadingLoadTaskScheduler().hasIdleThread()) {
+                LOG.info("Failed to take one broker load job from queue because of loading_load_task_scheduler is full."
+                        + " Waiting for next round. You can try to increase the value of"
+                        + " Config.async_loading_load_task_pool_size");
                 return;
             }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java
index 2a3ed1a54ff425..0e55cc999fa99a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java
@@ -223,7 +223,7 @@ public EtlStatus getEtlJobStatus(SparkLoadAppHandle handle, String appId, long l
                     status.setFailMsg("yarn app state: " + state.toString());
                 }
             }
-            status.setTrackingUrl(handle.getUrl() != null? handle.getUrl() : report.getTrackingUrl());
+            status.setTrackingUrl(handle.getUrl() != null ? handle.getUrl() : report.getTrackingUrl());
             status.setProgress((int) (report.getProgress() * 100));
         } else {
             // state from handle
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java
index 5dd171d051c21b..fc1d2a43032b90 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java
@@ -170,10 +170,8 @@ public void run() {
                             default:
                                 Preconditions.checkState(false, "wrong spark app state");
                         }
-                    }
-                    // parse other values
-                    else if (line.contains(QUEUE) || line.contains(START_TIME) || line.contains(FINAL_STATUS) ||
-                            line.contains(URL) || line.contains(USER)) {
+                    } else if (line.contains(QUEUE) || line.contains(START_TIME) || line.contains(FINAL_STATUS)
+                            || line.contains(URL) || line.contains(USER)) { // parse other values
                         String value = getValue(line);
                         if (!Strings.isNullOrEmpty(value)) {
                             try {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadAppHandle.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadAppHandle.java
index 3620222bbd3240..85a923d19faefd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadAppHandle.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadAppHandle.java
@@ -116,23 +116,41 @@ public void kill() {
         }
     }
 
-    public State getState() { return this.state; }
+    public State getState() {
+        return this.state;
+    }
 
-    public String getAppId() { return this.appId; }
+    public String getAppId() {
+        return this.appId;
+    }
 
-    public String getQueue() { return this.queue; }
+    public String getQueue() {
+        return this.queue;
+    }
 
-    public Process getProcess() { return this.process; }
+    public Process getProcess() {
+        return this.process;
+    }
 
-    public long getStartTime() { return this.startTime; }
+    public long getStartTime() {
+        return this.startTime;
+    }
 
-    public FinalApplicationStatus getFinalStatus() { return this.finalStatus; }
+    public FinalApplicationStatus getFinalStatus() {
+        return this.finalStatus;
+    }
 
-    public String getUrl() { return this.trackingUrl; }
+    public String getUrl() {
+        return this.trackingUrl;
+    }
 
-    public String getUser() { return this.user; }
+    public String getUser() {
+        return this.user;
+    }
 
-    public String getLogPath() { return this.logPath; }
+    public String getLogPath() {
+        return this.logPath;
+    }
 
     public void setProcess(Process process) {
         this.process = process;
@@ -183,7 +201,7 @@ private void fireEvent(boolean isInfoChanged) {
             Iterator iterator = this.listeners.iterator();
 
             while (iterator.hasNext()) {
-                Listener l = (Listener)iterator.next();
+                Listener l = (Listener) iterator.next();
                 if (isInfoChanged) {
                     l.infoChanged(this);
                 } else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java
index a702a81ab51f72..37bae6bd074898 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java
@@ -192,10 +192,10 @@ private void setResourceInfo() throws DdlException {
     public void beginTxn()
             throws LabelAlreadyUsedException, BeginTransactionException, AnalysisException, DuplicatedRequestException,
             QuotaExceedException, MetaNotFoundException {
-       transactionId = Catalog.getCurrentGlobalTransactionMgr()
+        transactionId = Catalog.getCurrentGlobalTransactionMgr()
                 .beginTransaction(dbId, Lists.newArrayList(fileGroupAggInfo.getAllTableIds()), label, null,
-                                  new TxnCoordinator(TxnSourceType.FE, FrontendOptions.getLocalHostAddress()),
-                                  LoadJobSourceType.FRONTEND, id, getTimeout());
+                        new TxnCoordinator(TxnSourceType.FE, FrontendOptions.getLocalHostAddress()),
+                        LoadJobSourceType.FRONTEND, id, getTimeout());
     }
 
     @Override
@@ -497,9 +497,11 @@ private Set submitPushTasks() throws UserException {
                                         tBrokerScanRange.getBrokerAddresses().add(
                                                 new TNetworkAddress(fsBroker.ip, fsBroker.port));
 
-                                        LOG.debug("push task for replica {}, broker {}:{}, backendId {}, filePath {}, fileSize {}" ,
-                                                  replicaId, fsBroker.ip, fsBroker.port, backendId, tBrokerRangeDesc.path,
-                                                  tBrokerRangeDesc.file_size);
+                                        LOG.debug("push task for replica {}, broker {}:{},"
+                                                        + " backendId {}, filePath {}, fileSize {}",
+                                                replicaId, fsBroker.ip, fsBroker.port, backendId,
+                                                tBrokerRangeDesc.path,
+                                                tBrokerRangeDesc.file_size);
 
                                         PushTask pushTask = new PushTask(backendId, dbId, olapTable.getId(), partitionId,
                                                                          indexId, tabletId, replicaId, schemaHash,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkPendingTaskAttachment.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkPendingTaskAttachment.java
index 4189defdb693a7..a2bbb058e934c6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkPendingTaskAttachment.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkPendingTaskAttachment.java
@@ -52,9 +52,9 @@ public void setOutputPath(String outputPath) {
 
     @Override
     public String toString() {
-        return "SparkPendingTaskAttachment{" +
-                "appId='" + appId + '\'' +
-                ", outputPath='" + outputPath + '\'' +
-                '}';
+        return "SparkPendingTaskAttachment{"
+                + "appId='" + appId + '\''
+                + ", outputPath='" + outputPath + '\''
+                + '}';
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java
index 7f5a57831985ff..4476e0d7a69c8c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java
@@ -171,36 +171,36 @@ private void uploadArchive(boolean isReplace) throws LoadException {
             }
             String srcFilePath = null;
             // upload dpp
-            {
+            { // CHECKSTYLE IGNORE THIS LINE
                 // 1. upload dpp
                 srcFilePath = localDppPath;
                 String fileName = getFileName(PATH_DELIMITER, srcFilePath);
-                String origFilePath = remoteArchivePath + PATH_DELIMITER +
-                        assemblyFileName(PREFIX_LIB, "", fileName, "");
+                String origFilePath = remoteArchivePath + PATH_DELIMITER
+                        + assemblyFileName(PREFIX_LIB, "", fileName, "");
                 upload(srcFilePath, origFilePath);
                 // 2. rename dpp
                 String md5sum = getMd5String(srcFilePath);
                 long size = getFileSize(srcFilePath);
-                String destFilePath = remoteArchivePath + PATH_DELIMITER +
-                        assemblyFileName(PREFIX_LIB, md5sum, fileName, "");
+                String destFilePath = remoteArchivePath + PATH_DELIMITER
+                        + assemblyFileName(PREFIX_LIB, md5sum, fileName, "");
                 rename(origFilePath, destFilePath);
                 currentArchive.libraries.add(new SparkLibrary(destFilePath, md5sum, SparkLibrary.LibType.DPP, size));
-            }
+            } // CHECKSTYLE IGNORE THIS LINE
             // upload spark2x
-            {
+            { // CHECKSTYLE IGNORE THIS LINE
                 // 1. upload spark2x
                 srcFilePath = localSpark2xPath;
-                String origFilePath = remoteArchivePath + PATH_DELIMITER +
-                        assemblyFileName(PREFIX_LIB, "", SPARK_2X, ".zip");
+                String origFilePath = remoteArchivePath + PATH_DELIMITER
+                        + assemblyFileName(PREFIX_LIB, "", SPARK_2X, ".zip");
                 upload(srcFilePath, origFilePath);
                 // 2. rename spark2x
                 String md5sum = getMd5String(srcFilePath);
                 long size = getFileSize(srcFilePath);
-                String destFilePath = remoteArchivePath + PATH_DELIMITER +
-                        assemblyFileName(PREFIX_LIB, md5sum, SPARK_2X, ".zip");
+                String destFilePath = remoteArchivePath + PATH_DELIMITER
+                        + assemblyFileName(PREFIX_LIB, md5sum, SPARK_2X, ".zip");
                 rename(origFilePath, destFilePath);
                 currentArchive.libraries.add(new SparkLibrary(destFilePath, md5sum, SparkLibrary.LibType.SPARK2X, size));
-            }
+            } // CHECKSTYLE IGNORE THIS LINE
             LOG.info("finished to upload archive to repository, currentDppVersion={}, path={}",
                     currentDppVersion, remoteArchivePath);
         } catch (UserException e) {
@@ -275,11 +275,11 @@ public long getFileSize(String filePath) throws LoadException {
 
     private void upload(String srcFilePath, String destFilePath) throws LoadException {
         try {
-            BrokerUtil.writeFile(srcFilePath, destFilePath , brokerDesc);
+            BrokerUtil.writeFile(srcFilePath, destFilePath, brokerDesc);
             LOG.info("finished to upload file, localPath={}, remotePath={}", srcFilePath, destFilePath);
         } catch (UserException e) {
-            throw new LoadException("failed to upload lib to repository, srcPath=" +srcFilePath +
-                    " destPath=" + destFilePath + " message=" + e.getMessage());
+            throw new LoadException("failed to upload lib to repository, srcPath=" + srcFilePath
+                    + " destPath=" + destFilePath + " message=" + e.getMessage());
         }
     }
 
@@ -288,8 +288,8 @@ private void rename(String origFilePath, String destFilePath) throws LoadExcepti
             BrokerUtil.rename(origFilePath, destFilePath, brokerDesc);
             LOG.info("finished to rename file, originPath={}, destPath={}", origFilePath, destFilePath);
         } catch (UserException e) {
-            throw new LoadException("failed to rename file from " + origFilePath + " to " + destFilePath +
-                    ", message=" + e.getMessage());
+            throw new LoadException("failed to rename file from " + origFilePath + " to " + destFilePath
+                    + ", message=" + e.getMessage());
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkYarnConfigFiles.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkYarnConfigFiles.java
index 5b06853a0a9aac..88038d081b2ccf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkYarnConfigFiles.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkYarnConfigFiles.java
@@ -232,7 +232,7 @@ private void createXML(String filePath, Map properties) throws L
         private Node appendNode(Node parent, String tag, String content) {
             Element child = null;
             if (parent instanceof  Document) {
-                child = ((Document)parent).createElement(tag);
+                child = ((Document) parent).createElement(tag);
             } else {
                 child = parent.getOwnerDocument().createElement(tag);
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java
index f645ed93b9adca..de740873514cc0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java
@@ -39,7 +39,6 @@
 import org.apache.doris.common.util.SmallFileMgr.SmallFile;
 import org.apache.doris.common.util.TimeUtils;
 import org.apache.doris.persist.AlterRoutineLoadJobOperationLog;
-import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.transaction.TransactionState;
 import org.apache.doris.transaction.TransactionStatus;
 
@@ -228,7 +227,6 @@ public void divideRoutineLoadJob(int currentConcurrentTaskNum) throws UserExcept
 
     @Override
     public int calculateCurrentConcurrentTaskNum() {
-        SystemInfoService systemInfoService = Catalog.getCurrentSystemInfo();
         int partitionNum = currentKafkaPartitions.size();
         if (desireTaskConcurrentNum == 0) {
             desireTaskConcurrentNum = Config.max_routine_load_task_concurrent_num;
@@ -254,8 +252,8 @@ protected boolean checkCommitInfo(RLTaskTxnCommitAttachment rlTaskTxnCommitAttac
 
         // Running here, the status of the transaction should be ABORTED,
         // and it is caused by other errors. In this case, we should not update the offset.
-        LOG.debug("no need to update the progress of kafka routine load. txn status: {}, " +
-                        "txnStatusChangeReason: {}, task: {}, job: {}",
+        LOG.debug("no need to update the progress of kafka routine load. txn status: {}, "
+                        + "txnStatusChangeReason: {}, task: {}, job: {}",
                 txnState.getTransactionStatus(), txnStatusChangeReason,
                 DebugUtil.printId(rlTaskTxnCommitAttachment.getTaskId()), id);
         return false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
index 34df317104cefa..d8a8ec210cb089 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
@@ -20,7 +20,6 @@
 import org.apache.doris.analysis.AlterRoutineLoadStmt;
 import org.apache.doris.analysis.CreateRoutineLoadStmt;
 import org.apache.doris.analysis.Expr;
-import org.apache.doris.analysis.ImportColumnDesc;
 import org.apache.doris.analysis.ImportColumnsStmt;
 import org.apache.doris.analysis.LoadStmt;
 import org.apache.doris.analysis.PartitionNames;
@@ -111,29 +110,29 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl
     public static final boolean DEFAULT_LOAD_TO_SINGLE_TABLET = false;
 
     protected static final String STAR_STRING = "*";
-     /*
-                      +-----------------+
-     fe schedule job  |  NEED_SCHEDULE  |  user resume job
-          +-----------+                 | <---------+
-          |           |                 |           |
-          v           +-----------------+           ^
-          |                                         |
-     +------------+   user(system)pause job +-------+----+
-     |  RUNNING   |                         |  PAUSED    |
-     |            +-----------------------> |            |
-     +----+-------+                         +-------+----+
-     |    |                                         |
-     |    |           +---------------+             |
-     |    |           | STOPPED       |             |
-     |    +---------> |               | <-----------+
-     |   user stop job+---------------+    user stop job
-     |
-     |
-     |               +---------------+
-     |               | CANCELLED     |
-     +-------------> |               |
-     system error    +---------------+
-     */
+    /*
+                     +-----------------+
+    fe schedule job  |  NEED_SCHEDULE  |  user resume job
+         +-----------+                 | <---------+
+         |           |                 |           |
+         v           +-----------------+           ^
+         |                                         |
+    +------------+   user(system)pause job +-------+----+
+    |  RUNNING   |                         |  PAUSED    |
+    |            +-----------------------> |            |
+    +----+-------+                         +-------+----+
+    |    |                                         |
+    |    |           +---------------+             |
+    |    |           | STOPPED       |             |
+    |    +---------> |               | <-----------+
+    |   user stop job+---------------+    user stop job
+    |
+    |
+    |               +---------------+
+    |               | CANCELLED     |
+    +-------------> |               |
+    system error    +---------------+
+    */
     public enum JobState {
         NEED_SCHEDULE,
         RUNNING,
@@ -1011,7 +1010,7 @@ public void afterAborted(TransactionState txnState, boolean txnOperated, String
                             case OFFSET_OUT_OF_RANGE:
                             case PAUSE:
                                 String msg = "be " + taskBeId + " abort task "
-                                    + "with reason: " + txnStatusChangeReasonString;
+                                        + "with reason: " + txnStatusChangeReasonString;
                                 updateState(JobState.PAUSED,
                                         new ErrorReason(InternalErrorCode.TASKS_ABORT_ERR, msg),
                                         false /* not replay */);
@@ -1303,7 +1302,7 @@ public List getShowInfo() {
                     row.add(pauseReason == null ? "" : pauseReason.toString());
                     break;
                 case CANCELLED:
-                    row.add(cancelReason == null? "" : cancelReason.toString());
+                    row.add(cancelReason == null ? "" : cancelReason.toString());
                     break;
                 default:
                     row.add("");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java
index 3a2e932bc0d94b..0b3f45cfba83b3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java
@@ -55,7 +55,6 @@
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -554,7 +553,7 @@ public List getJob(String dbFullName, String jobName, boolean in
         // return all of routine load job
         List result;
         RESULT:
-        {
+        { // CHECKSTYLE IGNORE THIS LINE
             if (dbFullName == null) {
                 result = new ArrayList<>(idToRoutineLoadJob.values());
                 sortRoutineLoadJob(result);
@@ -582,7 +581,7 @@ public List getJob(String dbFullName, String jobName, boolean in
                 break RESULT;
             }
             return null;
-        }
+        } // CHECKSTYLE IGNORE THIS LINE
 
         if (!includeHistory) {
             result = result.stream().filter(entity -> !entity.getState().isFinalState()).collect(Collectors.toList());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java
index b01b196595b10f..589f72dc8f3444 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java
@@ -89,7 +89,6 @@ private void process() throws UserException, InterruptedException {
         // update the max slot num of each backend periodically
         updateBackendSlotIfNecessary();
 
-        long start = System.currentTimeMillis();
         // if size of queue is zero, tasks will be submitted by batch
         int idleSlotNum = routineLoadManager.getClusterIdleSlotNum();
         // scheduler will be blocked when there is no slot for task in cluster
@@ -142,8 +141,7 @@ private void scheduleOneTask(RoutineLoadTaskInfo routineLoadTaskInfo) throws Exc
             }
         } catch (UserException e) {
             routineLoadManager.getJob(routineLoadTaskInfo.getJobId()).
-                    updateState(JobState.PAUSED,
-                    new ErrorReason(e.getErrorCode(), e.getMessage()), false);
+                    updateState(JobState.PAUSED, new ErrorReason(e.getErrorCode(), e.getMessage()), false);
             throw e;
         } catch (Exception e) {
             // exception happens, PAUSE the job
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java
index 19dfb1628429ea..f7d7172b2db8f5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java
@@ -47,7 +47,7 @@ public static boolean isNeedAutoSchedule(RoutineLoadJob jobRoutine) {
         if (jobRoutine.state != RoutineLoadJob.JobState.PAUSED) {
             return false;
         }
-        if (jobRoutine.autoResumeLock) {//only manual resume for unlock
+        if (jobRoutine.autoResumeLock) { //only manual resume for unlock
             LOG.debug("routine load job {}'s autoResumeLock is true, skip", jobRoutine.id);
             return false;
         }
@@ -55,28 +55,28 @@ public static boolean isNeedAutoSchedule(RoutineLoadJob jobRoutine) {
         /*
          * Handle all backends are down.
          */
-        LOG.debug("try to auto reschedule routine load {}, firstResumeTimestamp: {}, autoResumeCount: {}, " +
-                        "pause reason: {}",
+        LOG.debug("try to auto reschedule routine load {}, firstResumeTimestamp: {}, autoResumeCount: {}, "
+                        + "pause reason: {}",
                 jobRoutine.id, jobRoutine.firstResumeTimestamp, jobRoutine.autoResumeCount,
                 jobRoutine.pauseReason == null ? "null" : jobRoutine.pauseReason.getCode().name());
         if (jobRoutine.pauseReason != null && jobRoutine.pauseReason.getCode() == InternalErrorCode.REPLICA_FEW_ERR) {
             int dead = deadBeCount(jobRoutine.clusterName);
             if (dead > Config.max_tolerable_backend_down_num) {
-                LOG.debug("dead backend num {} is larger than config {}, " +
-                                "routine load job {} can not be auto rescheduled",
+                LOG.debug("dead backend num {} is larger than config {}, "
+                                + "routine load job {} can not be auto rescheduled",
                         dead, Config.max_tolerable_backend_down_num, jobRoutine.id);
                 return false;
             }
 
-            if (jobRoutine.firstResumeTimestamp == 0) {//the first resume
+            if (jobRoutine.firstResumeTimestamp == 0) { //the first resume
                 jobRoutine.firstResumeTimestamp = System.currentTimeMillis();
                 jobRoutine.autoResumeCount = 1;
                 return true;
             } else {
                 long current = System.currentTimeMillis();
-                if (current - jobRoutine.firstResumeTimestamp < Config.period_of_auto_resume_min * 60000) {
+                if (current - jobRoutine.firstResumeTimestamp < Config.period_of_auto_resume_min * 60000L) {
                     if (jobRoutine.autoResumeCount >= 3) {
-                        jobRoutine.autoResumeLock = true;// locked Auto Resume RoutineLoadJob
+                        jobRoutine.autoResumeLock = true; // locked Auto Resume RoutineLoadJob
                         return false;
                     }
                     jobRoutine.autoResumeCount++;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java
index 8b64f34acd6eb1..c0fa55ac6ec176 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java
@@ -60,7 +60,7 @@ private void process() throws UserException {
             UserException exception = null;
             try {
                 job.execute();
-            } catch (MetaNotFoundException| DdlException e) {
+            } catch (MetaNotFoundException | DdlException e) {
                 msgType = SyncFailMsg.MsgType.SCHEDULE_FAIL;
                 exception = e;
                 LOG.warn(e.getMessage());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java
index f010e8c529dc45..d42821dfefe3f5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java
@@ -258,7 +258,7 @@ public boolean isJobNameExist(String dbName, String jobName) throws DdlException
             Map> jobNameToSyncJobs = dbIdToJobNameToSyncJobs.get(db.getId());
             if (jobNameToSyncJobs != null && jobNameToSyncJobs.containsKey(jobName)) {
                 List matchJobs = jobNameToSyncJobs.get(jobName);
-                for(SyncJob syncJob : matchJobs) {
+                for (SyncJob syncJob : matchJobs) {
                     if (!syncJob.isCancelled()) {
                         result = true;
                     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalDestination.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalDestination.java
index f2ae06a8f0168f..b15afeff1e1b35 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalDestination.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalDestination.java
@@ -106,14 +106,14 @@ public boolean equals(Object other) {
             return false;
         }
         CanalDestination otherDestination = (CanalDestination) other;
-        return ip.equalsIgnoreCase(otherDestination.getIp()) && port == otherDestination.getPort() &&
-                destination.equalsIgnoreCase(otherDestination.getDestination());
+        return ip.equalsIgnoreCase(otherDestination.getIp()) && port == otherDestination.getPort()
+                && destination.equalsIgnoreCase(otherDestination.getDestination());
     }
 
     @Override
     public String toString() {
-        return "CanalDestination [ip=" + ip + ", port=" + port +
-                ", destination=" + destination + "]";
+        return "CanalDestination [ip=" + ip + ", port=" + port
+                + ", destination=" + destination + "]";
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java
index f292107cacdbeb..cb101e6891881d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java
@@ -129,11 +129,11 @@ public void beginTxn(long batchId) throws UserException, TException, TimeoutExce
                 TStreamLoadPutRequest request = null;
                 try {
                     long txnId = globalTransactionMgr.beginTransaction(db.getId(),
-                        Lists.newArrayList(tbl.getId()), label,
+                            Lists.newArrayList(tbl.getId()), label,
                         new TransactionState.TxnCoordinator(TransactionState.TxnSourceType.FE,
                             FrontendOptions.getLocalHostAddress()), sourceType, timeoutSecond);
                     String authCodeUuid = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(
-                        db.getId(), txnId).getAuthCode();
+                            db.getId(), txnId).getAuthCode();
                     request = new TStreamLoadPutRequest()
                         .setTxnId(txnId).setDb(txnConf.getDb()).setTbl(txnConf.getTbl())
                         .setFileType(TFileType.FILE_STREAM).setFormatType(TFileFormatType.FORMAT_CSV_PLAIN)
@@ -142,46 +142,48 @@ public void beginTxn(long batchId) throws UserException, TException, TimeoutExce
                         .setColumns(targetColumn);
                     txnConf.setTxnId(txnId).setAuthCodeUuid(authCodeUuid);
                     txnEntry.setLabel(label);
-                    txnExecutor.setTxnId (txnId);
+                    txnExecutor.setTxnId(txnId);
                 } catch (DuplicatedRequestException e) {
-                    LOG.warn ("duplicate request for sync channel. channel: {}, request id: {}, txn: {}, table: {}",
-                        id, e.getDuplicatedRequestId(), e.getTxnId(), targetTable);
+                    LOG.warn("duplicate request for sync channel. channel: {},"
+                                    + " request id: {}, txn: {}, table: {}",
+                            id, e.getDuplicatedRequestId(), e.getTxnId(), targetTable);
                     txnExecutor.setTxnId(e.getTxnId());
                 } catch (LabelAlreadyUsedException e) {
                     // this happens when channel re-consume same batch,
                     // we should just pass through it without begin a new txn
-                    LOG.warn ("Label already used in channel {}, label: {}, table: {}, batch: {}",
-                        id, label, targetTable, batchId);
+                    LOG.warn("Label already used in channel {}, label: {}, table: {}, batch: {}",
+                            id, label, targetTable, batchId);
                     return;
                 } catch (AnalysisException | BeginTransactionException e) {
-                    LOG.warn ("encounter an error when beginning txn in channel {}, table: {}",
-                        id, targetTable);
+                    LOG.warn("encounter an error when beginning txn in channel {}, table: {}",
+                            id, targetTable);
                     throw e;
                 } catch (UserException e) {
-                    LOG.warn ("encounter an error when creating plan in channel {}, table: {}",
-                        id, targetTable);
+                    LOG.warn("encounter an error when creating plan in channel {}, table: {}",
+                            id, targetTable);
                     throw e;
                 }
                 try {
                     // async exec begin transaction
                     long txnId = txnExecutor.getTxnId();
-                    if ( txnId != - 1L ) {
-                        this.txnExecutor.beginTransaction (request);
-                        LOG.info ("begin txn in channel {}, table: {}, label:{}, txn id: {}",
-                            id, targetTable, label, txnExecutor.getTxnId());
+                    if (txnId != - 1L) {
+                        this.txnExecutor.beginTransaction(request);
+                        LOG.info("begin txn in channel {}, table: {}, label:{}, txn id: {}",
+                                id, targetTable, label, txnExecutor.getTxnId());
                     }
-                } catch ( TException e) {
-                    LOG.warn ("Failed to begin txn in channel {}, table: {}, txn: {}, msg:{}",
-                        id, targetTable, txnExecutor.getTxnId(), e.getMessage());
+                } catch (TException e) {
+                    LOG.warn("Failed to begin txn in channel {}, table: {}, txn: {}, msg:{}",
+                            id, targetTable, txnExecutor.getTxnId(), e.getMessage());
                     throw e;
-                } catch ( TimeoutException | InterruptedException | ExecutionException e) {
-                    LOG.warn ("Error occur while waiting begin txn response in channel {}, table: {}, txn: {}, msg:{}",
-                        id, targetTable, txnExecutor.getTxnId(), e.getMessage());
+                } catch (TimeoutException | InterruptedException | ExecutionException e) {
+                    LOG.warn("Error occur while waiting begin txn response in channel {},"
+                            + " table: {}, txn: {}, msg:{}", id, targetTable, txnExecutor.getTxnId(), e.getMessage());
                     throw e;
                 }
             } else {
                 String failMsg = "current running txns on db " + db.getId() + " is "
-                    + databaseTransactionMgr.getRunningTxnNums() + ", larger than limit " + Config.max_running_txn_num_per_db;
+                        + databaseTransactionMgr.getRunningTxnNums()
+                        + ", larger than limit " + Config.max_running_txn_num_per_db;
                 LOG.warn(failMsg);
                 throw new BeginTransactionException(failMsg);
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java
index 7749c9d6e745ce..71c004ba19d90c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java
@@ -313,7 +313,7 @@ public CanalDestination getRemote() {
     @Override
     public String toString() {
         return "SyncJob [jobId=" + id
-                + ", jobName=" +jobName
+                + ", jobName=" + jobName
                 + ", dbId=" + dbId
                 + ", state=" + jobState
                 + ", createTimeMs=" + TimeUtils.longToTimeString(createTimeMs)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java
index 1bc828718a87fc..caced40563a65f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java
@@ -154,6 +154,7 @@ public static void printColumn(List columns) {
                             .append(column.getValue());
                 }
             } catch (UnsupportedEncodingException e) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
             builder.append("    type=").append(column.getMysqlType());
             if (column.getUpdated()) {
@@ -194,7 +195,7 @@ public static void transactionBegin(CanalEntry.Entry entry) {
             throw new CanalException("parse event has an error , data:" + entry.toString(), e);
         }
         // print transaction begin info, thread ID, time consumption
-        logger.info(transaction_format,entry.getHeader().getLogfileName(),
+        logger.info(transaction_format, entry.getHeader().getLogfileName(),
                 String.valueOf(entry.getHeader().getLogfileOffset()),
                 String.valueOf(entry.getHeader().getExecuteTime()), simpleDateFormat.format(date),
                 entry.getHeader().getGtid(), String.valueOf(delayTime));
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java
index 419247f033d112..9486713280e04a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java
@@ -29,7 +29,7 @@ public class EntryPosition {
     public static final EntryPosition MIN_POS = new EntryPosition("", -1L, null);
 
     public EntryPosition() {
-        this(null, (Long)null, (Long)null);
+        this(null, null, null);
     }
 
     public EntryPosition(String journalName, Long position, Long timestamp) {
@@ -40,7 +40,7 @@ public EntryPosition(String journalName, Long position, Long timestamp) {
     }
 
     public EntryPosition(String journalName, Long position) {
-        this(journalName, position, (Long)null);
+        this(journalName, position, null);
     }
 
     public String getJournalName() {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdatePlanner.java b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdatePlanner.java
index c8660c316d699c..501f5bcc76ff90 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdatePlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdatePlanner.java
@@ -162,8 +162,8 @@ private List computeOutputExprs() throws AnalysisException {
         for (int i = 0; i < targetTable.getFullSchema().size(); i++) {
             Column column = targetTable.getFullSchema().get(i);
             // pay attention to case ignore of column name
-            String originColumnName = (column.getName().startsWith(SchemaChangeHandler.SHADOW_NAME_PRFIX) ?
-                    column.getName().substring(SchemaChangeHandler.SHADOW_NAME_PRFIX.length()) : column.getName())
+            String originColumnName = (column.getName().startsWith(SchemaChangeHandler.SHADOW_NAME_PRFIX)
+                    ? column.getName().substring(SchemaChangeHandler.SHADOW_NAME_PRFIX.length()) : column.getName())
                     .toLowerCase();
             Expr setExpr = columnNameToSetExpr.get(originColumnName);
             SlotDescriptor srcSlotDesc = columnNameToSrcSlotDesc.get(originColumnName);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java
index 5cd07f463e2cd0..8f7cc064d9dd7c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java
@@ -129,9 +129,9 @@ public TMasterResult finishTask(TFinishTaskRequest request) {
         } else {
             if (taskStatus.getStatusCode() != TStatusCode.OK) {
                 task.failed();
-                String errMsg = "task type: " + taskType + ", status_code: " + taskStatus.getStatusCode().toString() +
-                        (taskStatus.isSetErrorMsgs() ? (", status_message: " + taskStatus.getErrorMsgs()) : "") +
-                        ", backendId: " + backend + ", signature: " + signature;
+                String errMsg = "task type: " + taskType + ", status_code: " + taskStatus.getStatusCode().toString()
+                        + (taskStatus.isSetErrorMsgs() ? (", status_message: " + taskStatus.getErrorMsgs()) : "")
+                        + ", backendId: " + backend + ", signature: " + signature;
                 task.setErrorMsg(errMsg);
                 // We start to let FE perceive the task's error msg
                 if (taskType != TTaskType.MAKE_SNAPSHOT && taskType != TTaskType.UPLOAD
@@ -145,7 +145,6 @@ public TMasterResult finishTask(TFinishTaskRequest request) {
         }
 
         try {
-            List finishTabletInfos;
             switch (taskType) {
                 case CREATE:
                     Preconditions.checkState(request.isSetReportVersion());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java
index 49d143aee422ec..d130fbaf0a2678 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java
@@ -249,8 +249,8 @@ private static void tabletReport(long backendId, Map backendTable
                 backendId, backendTablets.size(), backendReportVersion);
 
         // storage medium map
-        HashMap storageMediumMap = Config.disable_storage_medium_check ?
-                Maps.newHashMap() : Catalog.getCurrentCatalog().getPartitionIdToStorageMediumMap();
+        HashMap storageMediumMap = Config.disable_storage_medium_check
+                ? Maps.newHashMap() : Catalog.getCurrentCatalog().getPartitionIdToStorageMediumMap();
 
         // db id -> tablet id
         ListMultimap tabletSyncMap = LinkedListMultimap.create();
@@ -729,8 +729,9 @@ private static void handleMigration(ListMultimap tabletMet
         for (TStorageMedium storageMedium : tabletMetaMigrationMap.keySet()) {
             List tabletIds = tabletMetaMigrationMap.get(storageMedium);
             if (!be.hasSpecifiedStorageMedium(storageMedium)) {
-                LOG.warn("no specified storage medium {} on backend {}, skip storage migration." +
-                        " sample tablet id: {}", storageMedium, backendId, tabletIds.isEmpty() ? "-1" : tabletIds.get(0));
+                LOG.warn("no specified storage medium {} on backend {}, skip storage migration."
+                        + " sample tablet id: {}", storageMedium, backendId, tabletIds.isEmpty()
+                        ? "-1" : tabletIds.get(0));
                 continue;
             }
             List tabletMetaList = invertedIndex.getTabletMetaList(tabletIds);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/JsonMetricVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/metric/JsonMetricVisitor.java
index 953745c0d1bc73..43b634360d4ca5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/metric/JsonMetricVisitor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/metric/JsonMetricVisitor.java
@@ -63,7 +63,7 @@ public void visit(StringBuilder sb, @SuppressWarnings("rawtypes") Metric metric)
             }
         }
         sb.append("\n\t},\n");
-        sb.append("\t\"unit\":\"").append(metric.getUnit().name().toLowerCase()).append( "\",\n");
+        sb.append("\t\"unit\":\"").append(metric.getUnit().name().toLowerCase()).append("\",\n");
 
         // value
         sb.append("\t\"value\":").append(metric.getValue().toString()).append("\n}");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java b/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java
index 318e88ceb9ddd5..1772197e46fc39 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java
@@ -344,8 +344,8 @@ public Long getValue() {
         HISTO_EDIT_LOG_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("editlog", "write", "latency", "ms"));
 
         METRIC_REGISTER.register(MetricRegistry.name("palo", "fe", "query", "max_instances_num_per_user"), (Gauge) () -> {
-            try{
-                return ((QeProcessorImpl)QeProcessorImpl.INSTANCE).getInstancesNumPerUser().values().stream()
+            try {
+                return ((QeProcessorImpl) QeProcessorImpl.INSTANCE).getInstancesNumPerUser().values().stream()
                         .reduce(-1, BinaryOperator.maxBy(Integer::compareTo));
             } catch (Throwable ex) {
                 LOG.warn("Get max_instances_num_per_user error", ex);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java b/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java
index bbe0b654ee01c2..53d14e3cf179b5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java
@@ -114,7 +114,7 @@ private void updateMemoryMetrics() {
         Map memInfoMap = Maps.newHashMap();
 
         try (FileReader fileReader = new FileReader(procFile);
-             BufferedReader br = new BufferedReader(fileReader)) {
+                BufferedReader br = new BufferedReader(fileReader)) {
             String[] parts;
             String line = null;
             while ((line = br.readLine()) != null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmInfo.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmInfo.java
index 5304cdb036925f..1a3a44b6856a29 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmInfo.java
@@ -49,14 +49,14 @@ public class JvmInfo {
             pid = -1;
         }
 
-        long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ?
-                0 : memoryMXBean.getHeapMemoryUsage().getInit();
-        long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ?
-                0 : memoryMXBean.getHeapMemoryUsage().getMax();
-        long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ?
-                0 : memoryMXBean.getNonHeapMemoryUsage().getInit();
-        long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ?
-                0 : memoryMXBean.getNonHeapMemoryUsage().getMax();
+        long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0
+                ? 0 : memoryMXBean.getHeapMemoryUsage().getInit();
+        long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0
+                ? 0 : memoryMXBean.getHeapMemoryUsage().getMax();
+        long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0
+                ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit();
+        long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0
+                ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax();
         long directMemoryMax = 0;
         try {
             Class vmClass = Class.forName("sun.misc.VM");
@@ -116,12 +116,14 @@ public class JvmInfo {
                 Object onErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError");
                 onError = (String) valueMethod.invoke(onErrorObject);
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
                 Object onOutOfMemoryErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError");
                 onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryErrorObject);
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
@@ -129,12 +131,14 @@ public class JvmInfo {
                         "UseCompressedOops");
                 useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOptionObject);
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
                 Object useG1GCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC");
                 useG1GC = (String) valueMethod.invoke(useG1GCVmOptionObject);
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
@@ -142,22 +146,25 @@ public class JvmInfo {
                         "InitialHeapSize");
                 configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOptionObject));
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
                 Object maxHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize");
                 configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOptionObject));
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
             try {
                 Object useSerialGCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseSerialGC");
                 useSerialGC = (String) valueMethod.invoke(useSerialGCVmOptionObject);
             } catch (Exception ignored) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
 
         } catch (Exception ignored) {
-
+            // CHECKSTYLE IGNORE THIS LINE
         }
 
         INSTANCE = new JvmInfo(pid, System.getProperty("java.version"), runtimeMXBean.getVmName(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java
index 40153bb9de997f..d4948f6fda0ed6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java
@@ -132,13 +132,12 @@ private String formatMessage(long extraSleepTime,
             GcTimes diff = gcTimesAfterSleep.get(name).subtract(
                     gcTimesBeforeSleep.get(name));
             if (diff.gcCount != 0) {
-                gcDiffs.add("GC pool '" + name + "' had collection(s): " +
-                        diff.toString());
+                gcDiffs.add("GC pool '" + name + "' had collection(s): " + diff);
             }
         }
 
-        String ret = "Detected pause in JVM or host machine (eg GC): " +
-                "pause of approximately " + extraSleepTime + "ms\n";
+        String ret = "Detected pause in JVM or host machine (eg GC): "
+                + "pause of approximately " + extraSleepTime + "ms\n";
         if (gcDiffs.isEmpty()) {
             ret += "No GCs detected";
         } else {
@@ -216,8 +215,8 @@ public void run() {
                 totalGcExtraSleepTime += extraSleepTime;
                 gcTimesBeforeSleep = gcTimesAfterSleep;
 
-                if (deadlockCheckIntervalS > 0 &&
-                        timeSinceDeadlockCheck.elapsed(TimeUnit.SECONDS) >= deadlockCheckIntervalS) {
+                if (deadlockCheckIntervalS > 0
+                        && timeSinceDeadlockCheck.elapsed(TimeUnit.SECONDS) >= deadlockCheckIntervalS) {
                     checkForDeadlocks();
                     timeSinceDeadlockCheck.reset().start();
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java
index 97982c4c582f12..ae5375df895d6e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java
@@ -96,13 +96,26 @@ public static JvmStats jvmStats() {
                 continue; // race protection
             }
             switch (threadInfo.getThreadState()) {
-                case NEW:           threadsNew++;           break;
-                case RUNNABLE:      threadsRunnable++;      break;
-                case BLOCKED:       threadsBlocked++;       break;
-                case WAITING:       threadsWaiting++;       break;
-                case TIMED_WAITING: threadsTimedWaiting++;  break;
-                case TERMINATED:    threadsTerminated++;    break;
-                default:                                    break;
+                case NEW:
+                    threadsNew++;
+                    break;
+                case RUNNABLE:
+                    threadsRunnable++;
+                    break;
+                case BLOCKED:
+                    threadsBlocked++;
+                    break;
+                case WAITING:
+                    threadsWaiting++;
+                    break;
+                case TIMED_WAITING:
+                    threadsTimedWaiting++;
+                    break;
+                case TERMINATED:
+                    threadsTerminated++;
+                    break;
+                default:
+                    break;
             }
         }
         Threads threads = new Threads(threadMXBean.getThreadCount(), threadMXBean.getPeakThreadCount(), threadsNew,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java
index 8f135bff100730..0e411665b6d899 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java
@@ -323,7 +323,7 @@ private static long parse(final String initialInput, final String normalized, fi
             return Long.parseLong(s);
         } catch (final NumberFormatException e) {
             try {
-                @SuppressWarnings("unused") final double ignored = Double.parseDouble(s);
+                @SuppressWarnings("unused") final double ignored = Double.parseDouble(s); // CHECKSTYLE IGNORE THIS LINE
                 throw new NumberFormatException("failed to parse, fractional time values are not supported");
             } catch (final NumberFormatException ignored) {
                 throw new NumberFormatException("failed to parse");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlHandshakePacket.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlHandshakePacket.java
index 856b2e443c1a14..56649ac6df5f96 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlHandshakePacket.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlHandshakePacket.java
@@ -88,7 +88,7 @@ public boolean checkAuthPluginSameAsDoris(String pluginName) {
     // If the auth default plugin in client is different from Doris
     // it will create a AuthSwitchRequest
     public void buildAuthSwitchRequest(MysqlSerializer serializer) {
-        serializer.writeInt1((byte)0xfe);
+        serializer.writeInt1((byte) 0xfe);
         serializer.writeNulTerminateString(AUTH_PLUGIN_NAME);
         serializer.writeBytes(authPluginData);
         serializer.writeInt1(0);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java
index 64f1a026a2250a..691f89d481f893 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java
@@ -88,9 +88,8 @@ public static byte[] createRandomString(int len) {
         random.nextBytes(bytes);
         // NOTE: MySQL challenge string can't contain 0.
         for (int i = 0; i < len; ++i) {
-            if ((bytes[i] >= 'a' && bytes[i] <= 'z')
-                    || (bytes[i] >= 'A' && bytes[i] <='Z')) {
-            } else {
+            if (!((bytes[i] >= 'a' && bytes[i] <= 'z')
+                    || (bytes[i] >= 'A' && bytes[i] <= 'Z'))) {
                 bytes[i] = (byte) ('a' + (bytes[i] % 26));
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java
index 6c60a1629b322e..7751d3f7835559 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java
@@ -244,8 +244,8 @@ public static boolean negotiate(ConnectContext context) throws IOException {
             // which Doris is using now.
             // Note: Check the authPacket whether support plugin auth firstly, before we check AuthPlugin between doris and client
             // to compatible with older version: like mysql 5.1
-            if (authPacket.getCapability().isPluginAuth() &&
-                    !handshakePacket.checkAuthPluginSameAsDoris(authPacket.getPluginName())) {
+            if (authPacket.getCapability().isPluginAuth()
+                    && !handshakePacket.checkAuthPluginSameAsDoris(authPacket.getPluginName())) {
                 // 1. clear the serializer
                 serializer.reset();
                 // 2. build the auth switch request and send to the client
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlSerializer.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlSerializer.java
index ce12168b51e22b..d5c798a622b873 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlSerializer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlSerializer.java
@@ -259,12 +259,6 @@ private int getMysqlTypeLength(PrimitiveType type) {
                     return 19;
                 }
             }
-            // todo:It needs to be obtained according to the field length set during the actual creation,
-            // todo:which is not supported for the time being.default is 255
-//            case DECIMAL:
-//            case DECIMALV2:
-//            case CHAR:
-//            case VARCHAR:
             default:
                 return 255;
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java
index 3657e03c9f8035..92a0538766b0d9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java
@@ -539,7 +539,7 @@ private boolean checkResourceInternal(UserIdentity currentUser, String resourceN
     }
 
     // Check if LDAP authentication is enabled.
-    private boolean isLdapAuthEnabled(){
+    private boolean isLdapAuthEnabled() {
         return LdapConfig.ldap_authentication_enabled;
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java
index fb4599c233b56c..7236165d637edb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java
@@ -45,14 +45,14 @@ public enum PaloPrivilege {
     };
 
     public static Map privInPaloToMysql =
-        ImmutableMap.builder() // No NODE_PRIV and ADMIN_PRIV in the mysql
-                .put(SELECT_PRIV, "SELECT")
-                .put(LOAD_PRIV, "INSERT")
-                .put(ALTER_PRIV, "ALTER")
-                .put(CREATE_PRIV, "CREATE")
-                .put(DROP_PRIV, "DROP")
-                .put(USAGE_PRIV, "USAGE")
-                .build();
+            ImmutableMap.builder() // No NODE_PRIV and ADMIN_PRIV in the mysql
+                    .put(SELECT_PRIV, "SELECT")
+                    .put(LOAD_PRIV, "INSERT")
+                    .put(ALTER_PRIV, "ALTER")
+                    .put(CREATE_PRIV, "CREATE")
+                    .put(DROP_PRIV, "DROP")
+                    .put(USAGE_PRIV, "USAGE")
+                    .build();
 
     private String name;
     private int idx;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java
index f53cfea1405e3c..22df74a925dc31 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java
@@ -133,7 +133,7 @@ public long getMaxConn() {
     }
 
     public long getMaxQueryInstances() {
-        return commonProperties.getMaxQueryInstances();// maxQueryInstances;
+        return commonProperties.getMaxQueryInstances(); // maxQueryInstances;
     }
 
     public String[] getSqlBlockRules() {
@@ -275,8 +275,8 @@ public void update(List> properties) throws UserException {
                 }
 
                 // check if sql_block_rule has already exist
-                for (String ruleName : value.replaceAll(" ","").split(",")){
-                    if (!ruleName.equals("") && !Catalog.getCurrentCatalog().getSqlBlockRuleMgr().existRule(ruleName)){
+                for (String ruleName : value.replaceAll(" ", "").split(",")) {
+                    if (!ruleName.equals("") && !Catalog.getCurrentCatalog().getSqlBlockRuleMgr().existRule(ruleName)) {
                         throw new DdlException("the sql block rule " + ruleName + " not exist");
                     }
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserResource.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserResource.java
index 22579330213c7c..1ce8c4dc496109 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserResource.java
@@ -125,7 +125,7 @@ public String toString() {
     public void write(DataOutput out) throws IOException {
         resource.write(out);
         out.writeInt(shareByGroup.size());
-        for (Map.Entry entry : shareByGroup.entrySet()) {
+        for (Map.Entry entry : shareByGroup.entrySet()) {
             Text.writeString(out, entry.getKey());
             out.writeInt(entry.getValue().get());
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/AlterViewInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/AlterViewInfo.java
index 3f14c65addb0bb..df1fb5696c6b15 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/AlterViewInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/AlterViewInfo.java
@@ -90,9 +90,9 @@ public boolean equals(Object other) {
             return false;
         }
         AlterViewInfo otherInfo = (AlterViewInfo) other;
-        return dbId == otherInfo.getDbId() && tableId == otherInfo.getTableId() &&
-                inlineViewDef.equalsIgnoreCase(otherInfo.getInlineViewDef()) && sqlMode == otherInfo.getSqlMode() &&
-                newFullSchema.equals(otherInfo.getNewFullSchema());
+        return dbId == otherInfo.getDbId() && tableId == otherInfo.getTableId()
+                && inlineViewDef.equalsIgnoreCase(otherInfo.getInlineViewDef()) && sqlMode == otherInfo.getSqlMode()
+                && newFullSchema.equals(otherInfo.getNewFullSchema());
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/BackendTabletsInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/BackendTabletsInfo.java
index 23c0a98725dd73..aa3a57cf226731 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/BackendTabletsInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/BackendTabletsInfo.java
@@ -126,9 +126,7 @@ public void readFields(DataInput in) throws IOException {
             replicaPersistInfos = Lists.newArrayList();
         }
 
-        if (in.readBoolean()) {
-
-        }
+        in.readBoolean();
     }
 
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java
index 7d93f83fefc8c1..5d6162834d7dd0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java
@@ -87,7 +87,7 @@ public static DropInfo read(DataInput in) throws IOException {
         return dropInfo;
     }
 
-    public boolean equals (Object obj) {
+    public boolean equals(Object obj) {
         if (this == obj) {
             return true;
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyPartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyPartitionInfo.java
index 7b043aa92e4607..1cd1c55e13101b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyPartitionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyPartitionInfo.java
@@ -107,8 +107,8 @@ public boolean equals(Object other) {
             return false;
         }
         ModifyPartitionInfo otherInfo = (ModifyPartitionInfo) other;
-        return dbId == otherInfo.getDbId() && tableId == otherInfo.getTableId() &&
-                dataProperty.equals(otherInfo.getDataProperty()) && replicaAlloc.equals(otherInfo.replicaAlloc)
+        return dbId == otherInfo.getDbId() && tableId == otherInfo.getTableId()
+                && dataProperty.equals(otherInfo.getDataProperty()) && replicaAlloc.equals(otherInfo.replicaAlloc)
                 && isInMemory == otherInfo.isInMemory();
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java
index a49174f5bc0826..be999316ade3ab 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java
@@ -43,12 +43,12 @@ public enum ReplicaOperationType {
 
         private final int value;
 
-        private ReplicaOperationType(int value) {
-          this.value = value;
+        ReplicaOperationType(int value) {
+            this.value = value;
         }
 
         public int getValue() {
-          return value;
+            return value;
         }
 
         public static ReplicaOperationType findByValue(int value) {
@@ -77,7 +77,7 @@ public static ReplicaOperationType findByValue(int value) {
                     return null;
             }
         }
-      }
+    }
 
     // required
     private ReplicaOperationType opType;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/Storage.java b/fe/fe-core/src/main/java/org/apache/doris/persist/Storage.java
index f7e4667ea7e870..eec6904476e778 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/Storage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/Storage.java
@@ -124,7 +124,7 @@ public void reload() throws IOException {
             String name = child.getName();
             try {
                 if (!name.equals(EDITS) && !name.equals(IMAGE_NEW)
-                    && !name.endsWith(".part") && name.contains(".")) {
+                        && !name.endsWith(".part") && name.contains(".")) {
                     if (name.startsWith(IMAGE)) {
                         long fileSeq = Long.parseLong(name.substring(name.lastIndexOf('.') + 1));
                         if (latestImageSeq < fileSeq) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/TablePropertyInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/TablePropertyInfo.java
index aca690f58e6702..0efbe65db65073 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/TablePropertyInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/TablePropertyInfo.java
@@ -76,7 +76,6 @@ public void write(DataOutput out) throws IOException {
     }
 
     public void readFields(DataInput in) throws IOException {
-        long dbId = -1;
         tableId = in.readLong();
         if (in.readBoolean()) {
             groupId = GroupId.read(in);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
index b7517679e9aee0..7f5c4170c3729c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
@@ -234,13 +234,13 @@ public Table deserialize(JsonElement json, Type typeOfT, JsonDeserializ
             Type typeOfR;
             Type typeOfC;
             Type typeOfV;
-            {
+            { // CHECKSTYLE IGNORE THIS LINE
                 ParameterizedType parameterizedType = (ParameterizedType) typeOfT;
                 Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
                 typeOfR = actualTypeArguments[0];
                 typeOfC = actualTypeArguments[1];
                 typeOfV = actualTypeArguments[2];
-            }
+            } // CHECKSTYLE IGNORE THIS LINE
             JsonObject tableJsonObject = json.getAsJsonObject();
             String tableClazz = tableJsonObject.get("clazz").getAsString();
             JsonArray rowKeysJsonArray = tableJsonObject.getAsJsonArray("rowKeys");
@@ -372,14 +372,13 @@ public JsonElement serialize(AtomicBoolean atomicBoolean, Type type,
         }
     }
 
-    public final static class ImmutableMapDeserializer implements JsonDeserializer> {
+    public final static class ImmutableMapDeserializer implements JsonDeserializer> {
         @Override
-        public ImmutableMap deserialize(final JsonElement json, final Type type,
-                                             final JsonDeserializationContext context) throws JsonParseException
-        {
+        public ImmutableMap deserialize(final JsonElement json, final Type type,
+                                             final JsonDeserializationContext context) throws JsonParseException {
             final Type type2 =
                     TypeUtils.parameterize(Map.class, ((ParameterizedType) type).getActualTypeArguments());
-            final Map map = context.deserialize(json, type2);
+            final Map map = context.deserialize(json, type2);
             return ImmutableMap.copyOf(map);
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java
index d49332d5f98d11..541543bb006b69 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java
@@ -269,14 +269,13 @@ protected void toThrift(TPlanNode msg) {
         msg.node_type = TPlanNodeType.AGGREGATION_NODE;
         List aggregateFunctions = Lists.newArrayList();
         // only serialize agg exprs that are being materialized
-        for (FunctionCallExpr e: aggInfo.getMaterializedAggregateExprs()) {
+        for (FunctionCallExpr e : aggInfo.getMaterializedAggregateExprs()) {
             aggregateFunctions.add(e.treeToThrift());
         }
-        msg.agg_node =
-          new TAggregationNode(
-                  aggregateFunctions,
-                  aggInfo.getIntermediateTupleId().asInt(),
-                  aggInfo.getOutputTupleId().asInt(), needsFinalize);
+        msg.agg_node = new TAggregationNode(
+                aggregateFunctions,
+                aggInfo.getIntermediateTupleId().asInt(),
+                aggInfo.getOutputTupleId().asInt(), needsFinalize);
         msg.agg_node.setUseStreamingPreaggregation(useStreamingPreagg);
         List groupingExprs = aggInfo.getGroupingExprs();
         if (groupingExprs != null) {
@@ -308,8 +307,7 @@ public String getNodeExplainString(String detailPrefix, TExplainLevel detailLeve
                     getExplainString(aggInfo.getAggregateExprs()) + "\n");
         }
         // TODO: group by can be very long. Break it into multiple lines
-        output.append(detailPrefix + "group by: ").append(
-          getExplainString(aggInfo.getGroupingExprs()) + "\n");
+        output.append(detailPrefix + "group by: ").append(getExplainString(aggInfo.getGroupingExprs()) + "\n");
         if (!conjuncts.isEmpty()) {
             output.append(detailPrefix + "having: ").append(getExplainString(conjuncts) + "\n");
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java
index 1a10f785bd1609..5c2c564bf1b564 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java
@@ -75,11 +75,11 @@ public class AnalyticEvalNode extends PlanNode {
     private final TupleDescriptor bufferedTupleDesc;
 
     public AnalyticEvalNode(
-        PlanNodeId id, PlanNode input, List analyticFnCalls,
-        List partitionExprs, List orderByElements,
-        AnalyticWindow analyticWindow, TupleDescriptor intermediateTupleDesc,
-        TupleDescriptor outputTupleDesc, ExprSubstitutionMap logicalToPhysicalSmap,
-        Expr partitionByEq, Expr orderByEq, TupleDescriptor bufferedTupleDesc) {
+            PlanNodeId id, PlanNode input, List analyticFnCalls,
+            List partitionExprs, List orderByElements,
+            AnalyticWindow analyticWindow, TupleDescriptor intermediateTupleDesc,
+            TupleDescriptor outputTupleDesc, ExprSubstitutionMap logicalToPhysicalSmap,
+            Expr partitionByEq, Expr orderByEq, TupleDescriptor bufferedTupleDesc) {
         super(id, input.getTupleIds(), "ANALYTIC");
         Preconditions.checkState(!tupleIds.contains(outputTupleDesc.getId()));
         // we're materializing the input row augmented with the analytic output tuple
@@ -185,8 +185,7 @@ protected void toThrift(TPlanNode msg) {
         msg.analytic_node.setIntermediateTupleId(intermediateTupleDesc.getId().asInt());
         msg.analytic_node.setOutputTupleId(outputTupleDesc.getId().asInt());
         msg.analytic_node.setPartitionExprs(Expr.treesToThrift(substitutedPartitionExprs));
-        msg.analytic_node.setOrderByExprs(
-            Expr.treesToThrift(OrderByElement.getOrderByExprs(orderByElements)));
+        msg.analytic_node.setOrderByExprs(Expr.treesToThrift(OrderByElement.getOrderByExprs(orderByElements)));
         msg.analytic_node.setAnalyticFunctions(Expr.treesToThrift(analyticFnCalls));
 
         if (analyticWindow == null) {
@@ -260,8 +259,7 @@ public String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
         }
 
         if (!conjuncts.isEmpty()) {
-            output.append(
-                prefix + "predicates: " + getExplainString(conjuncts) + "\n");
+            output.append(prefix + "predicates: " + getExplainString(conjuncts) + "\n");
         }
 
         return output.toString();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java
index f276c50e737b1b..e842b57ba9225e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java
@@ -121,8 +121,7 @@ public PlanNode createSingleNodePlan(PlanNode root,
 
         if (groupingExprs != null) {
             Preconditions.checkNotNull(inputPartitionExprs);
-            computeInputPartitionExprs(
-                partitionGroups, groupingExprs, root.getNumNodes(), inputPartitionExprs);
+            computeInputPartitionExprs(partitionGroups, groupingExprs, root.getNumNodes(), inputPartitionExprs);
         }
 
         PlanNode newRoot = root;
@@ -170,8 +169,7 @@ private void mergeSortGroups(List sortGroups) {
      * partition exprs has ndv estimate > numNodes, so that the resulting plan
      * still parallelizes across all nodes.
      */
-    private void mergePartitionGroups(
-        List partitionGroups, int numNodes) {
+    private void mergePartitionGroups(List partitionGroups, int numNodes) {
         boolean hasMerged = false;
 
         do {
@@ -273,14 +271,11 @@ private void orderGroups(List partitionGroups) {
         }
 
         // order by ascending combined output tuple size
-        Collections.sort(partitionGroups,
-        new Comparator() {
-            public int compare(PartitionGroup pg1, PartitionGroup pg2) {
-                Preconditions.checkState(pg1.totalOutputTupleSize > 0);
-                Preconditions.checkState(pg2.totalOutputTupleSize > 0);
-                int diff = pg1.totalOutputTupleSize - pg2.totalOutputTupleSize;
-                return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-            }
+        partitionGroups.sort((pg1, pg2) -> {
+            Preconditions.checkState(pg1.totalOutputTupleSize > 0);
+            Preconditions.checkState(pg2.totalOutputTupleSize > 0);
+            int diff = pg1.totalOutputTupleSize - pg2.totalOutputTupleSize;
+            return (Integer.compare(diff, 0));
         });
 
         if (nonPartitioning != null) {
@@ -298,8 +293,8 @@ public int compare(PartitionGroup pg1, PartitionGroup pg2) {
      * @throws AnalysisException
      */
     private SortInfo createSortInfo(
-        PlanNode input, List sortExprs, List isAsc,
-        List nullsFirst) throws AnalysisException {
+            PlanNode input, List sortExprs, List isAsc,
+            List nullsFirst) throws AnalysisException {
         // create tuple for sort output = the entire materialized input in a single tuple
         TupleDescriptor sortTupleDesc =
                 analyzer.getDescTbl().createTupleDescriptor("sort-tuple");
@@ -350,7 +345,7 @@ private SortInfo createSortInfo(
             Expr.removeDuplicates(tupleIsNullPredsToMaterialize);
 
             // Materialize relevant unique TupleIsNullPredicates.
-            for (Expr tupleIsNullPred: tupleIsNullPredsToMaterialize) {
+            for (Expr tupleIsNullPred : tupleIsNullPredsToMaterialize) {
                 SlotDescriptor sortSlotDesc = analyzer.addSlotDescriptor(sortTupleDesc);
                 sortSlotDesc.setType(tupleIsNullPred.getType());
                 sortSlotDesc.setIsMaterialized(true);
@@ -429,8 +424,7 @@ private PlanNode createSortGroupPlan(PlanNode root, SortGroup sortGroup,
                 DataPartition inputPartition = DataPartition.UNPARTITIONED;
 
                 if (!partitionExprs.isEmpty()) {
-                    inputPartition =
-                        new DataPartition(TPartitionType.HASH_PARTITIONED, partitionExprs);
+                    inputPartition = new DataPartition(TPartitionType.HASH_PARTITIONED, partitionExprs);
                 }
 
                 sortNode.setInputPartition(inputPartition);
@@ -592,8 +586,7 @@ private static boolean requiresIndependentEval(AnalyticExpr analyticExpr) {
          * match ours.
          */
         public boolean isCompatible(AnalyticExpr analyticExpr) {
-            if (requiresIndependentEval(analyticExprs.get(0)) ||
-                    requiresIndependentEval(analyticExpr)) {
+            if (requiresIndependentEval(analyticExprs.get(0)) || requiresIndependentEval(analyticExpr)) {
                 return false;
             }
 
@@ -639,11 +632,10 @@ public void init(Analyzer analyzer, String tupleName) {
             Preconditions.checkState(physicalIntermediateTuple == null);
             Preconditions.checkState(analyticFnCalls.size() == analyticExprs.size());
 
-            //          If needed, create the intermediate tuple first to maintain
-            //          intermediateTupleId < outputTupleId for debugging purposes and consistency with
-            //          tuple creation for aggregations.
-            boolean requiresIntermediateTuple =
-                AnalyticInfo.requiresIntermediateTuple(analyticFnCalls);
+            // If needed, create the intermediate tuple first to maintain
+            // intermediateTupleId < outputTupleId for debugging purposes and consistency with
+            // tuple creation for aggregations.
+            boolean requiresIntermediateTuple = AnalyticInfo.requiresIntermediateTuple(analyticFnCalls);
 
             if (requiresIntermediateTuple) {
                 physicalIntermediateTuple =
@@ -898,14 +890,11 @@ public void merge(PartitionGroup other) {
          * volume of data that needs to be sorted.
          */
         public void orderSortGroups() {
-            Collections.sort(sortGroups,
-            new Comparator() {
-                public int compare(SortGroup sg1, SortGroup sg2) {
-                    Preconditions.checkState(sg1.totalOutputTupleSize > 0);
-                    Preconditions.checkState(sg2.totalOutputTupleSize > 0);
-                    int diff = sg1.totalOutputTupleSize - sg2.totalOutputTupleSize;
-                    return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-                }
+            sortGroups.sort((sg1, sg2) -> {
+                Preconditions.checkState(sg1.totalOutputTupleSize > 0);
+                Preconditions.checkState(sg2.totalOutputTupleSize > 0);
+                int diff = sg1.totalOutputTupleSize - sg2.totalOutputTupleSize;
+                return (Integer.compare(diff, 0));
             });
 
             for (SortGroup sortGroup : sortGroups) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
index a824050258e86f..14cf1164bc9aa9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
@@ -66,7 +66,6 @@
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DataPartition.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DataPartition.java
index 6ae5d60b5f7c16..753520869408ec 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/DataPartition.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DataPartition.java
@@ -60,9 +60,9 @@ public class DataPartition {
     public DataPartition(TPartitionType type, List exprs) {
         Preconditions.checkNotNull(exprs);
         Preconditions.checkState(!exprs.isEmpty());
-        Preconditions.checkState(
-          type == TPartitionType.HASH_PARTITIONED || type == TPartitionType.RANGE_PARTITIONED
-                  || type == TPartitionType.BUCKET_SHFFULE_HASH_PARTITIONED);
+        Preconditions.checkState(type == TPartitionType.HASH_PARTITIONED
+                || type == TPartitionType.RANGE_PARTITIONED
+                || type == TPartitionType.BUCKET_SHFFULE_HASH_PARTITIONED);
         this.type = type;
         this.partitionExprs = ImmutableList.copyOf(exprs);
     }
@@ -73,8 +73,7 @@ public void substitute(ExprSubstitutionMap smap, Analyzer analyzer) throws Analy
     }
 
     public DataPartition(TPartitionType type) {
-        Preconditions.checkState(
-          type == TPartitionType.UNPARTITIONED || type == TPartitionType.RANDOM);
+        Preconditions.checkState(type == TPartitionType.UNPARTITIONED || type == TPartitionType.RANDOM);
         this.type = type;
         this.partitionExprs = ImmutableList.of();
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DataSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DataSink.java
index 03f24a532481be..3e0ff32d0676ed 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/DataSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DataSink.java
@@ -48,8 +48,13 @@ public abstract class DataSink {
 
     protected abstract TDataSink toThrift();
 
-    public void setFragment(PlanFragment fragment) { this.fragment = fragment; }
-    public PlanFragment getFragment() { return fragment; }
+    public void setFragment(PlanFragment fragment) {
+        this.fragment = fragment;
+    }
+
+    public PlanFragment getFragment() {
+        return fragment;
+    }
 
     public abstract PlanNodeId getExchNodeId();
 
@@ -59,7 +64,7 @@ public static DataSink createDataSink(Table table) throws AnalysisException {
         if (table instanceof MysqlTable) {
             return new MysqlTableSink((MysqlTable) table);
         } else if (table instanceof OdbcTable) {
-            return new OdbcTableSink((OdbcTable)table);
+            return new OdbcTableSink((OdbcTable) table);
         } else {
             throw new AnalysisException("Unknown table type " + table.getType());
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java
index f16f86884204f9..0f903d69d107bc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java
@@ -66,7 +66,7 @@ public String getExplainString(String prefix, TExplainLevel explainLevel) {
     protected TDataSink toThrift() {
         TDataSink result = new TDataSink(TDataSinkType.DATA_STREAM_SINK);
         TDataStreamSink tStreamSink =
-          new TDataStreamSink(exchNodeId.asInt(), outputPartition.toThrift());
+                new TDataStreamSink(exchNodeId.asInt(), outputPartition.toThrift());
         result.setStreamSink(tStreamSink);
         return result;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java
index 2b4485f1a147c0..0e9f511c7f7724 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java
@@ -615,8 +615,8 @@ private boolean canBucketShuffleJoin(HashJoinNode node, PlanNode leftRoot,
         //1 the left table has more than one partition or left table is not a stable colocate table
         if (leftScanNode.getSelectedPartitionIds().size() != 1) {
             ColocateTableIndex colocateIndex = Catalog.getCurrentColocateIndex();
-            if (!leftTable.isColocateTable() ||
-                    colocateIndex.isGroupUnstable(colocateIndex.getGroup(leftTable.getId()))) {
+            if (!leftTable.isColocateTable()
+                    || colocateIndex.isGroupUnstable(colocateIndex.getGroup(leftTable.getId()))) {
                 return false;
             }
         }
@@ -1108,7 +1108,7 @@ private PlanFragment createPhase2DistinctAggregationFragment(
             DataPartition mergePartition =
                     partitionExprs == null ? DataPartition.UNPARTITIONED : DataPartition.hashPartitioned(partitionExprs);
             // Convert the existing node to a preaggregation.
-            AggregationNode preaggNode = (AggregationNode)node.getChild(0);
+            AggregationNode preaggNode = (AggregationNode) node.getChild(0);
 
             preaggNode.setIsPreagg(ctx);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/EmptySetNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/EmptySetNode.java
index bda1f85aa54c79..bee839d430bfe8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/EmptySetNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/EmptySetNode.java
@@ -59,7 +59,7 @@ public void init(Analyzer analyzer) {
         // the logical output tuple is returned by getMaterializedTupleIds(). It needs
         // to be set as materialized (even though it isn't) to avoid failing precondition
         // checks generating the thrift for slot refs that may reference this tuple.
-        for (TupleId id: tupleIds) {
+        for (TupleId id : tupleIds) {
             analyzer.getTupleDesc(id).setIsMaterialized(true);
         }
         computeTupleStatAndMemLayout(analyzer);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ExchangeNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ExchangeNode.java
index c1c24960eca685..6d1125407ec194 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/ExchangeNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ExchangeNode.java
@@ -138,8 +138,8 @@ protected void toThrift(TPlanNode msg) {
         }
         if (mergeInfo != null) {
             TSortInfo sortInfo = new TSortInfo(
-                Expr.treesToThrift(mergeInfo.getOrderingExprs()), mergeInfo.getIsAscOrder(),
-                mergeInfo.getNullsFirst());
+                    Expr.treesToThrift(mergeInfo.getOrderingExprs()),
+                    mergeInfo.getIsAscOrder(), mergeInfo.getNullsFirst());
             msg.exchange_node.setSortInfo(sortInfo);
             msg.exchange_node.setOffset(offset);
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java
index ab04131c162f50..2a13ecbda6491b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java
@@ -203,8 +203,8 @@ public void initOutputSlotIds(Set requiredSlotIdSet, Analyzer analyzer)
         outputSlotIds = Lists.newArrayList();
         for (TupleId tupleId : tupleIds) {
             for (SlotDescriptor slotDescriptor : analyzer.getTupleDesc(tupleId).getSlots()) {
-                if (slotDescriptor.isMaterialized() &&
-                        (requiredSlotIdSet == null || requiredSlotIdSet.contains(slotDescriptor.getId()))) {
+                if (slotDescriptor.isMaterialized()
+                        && (requiredSlotIdSet == null || requiredSlotIdSet.contains(slotDescriptor.getId()))) {
                     outputSlotIds.add(slotDescriptor.getId());
                 }
             }
@@ -356,8 +356,8 @@ private static boolean hasNumRowsAndNdvStats(SlotDescriptor slotDesc) {
          * Groups the given EqJoinConjunctScanSlots by the lhs/rhs tuple combination
          * and returns the result as a map.
          */
-        public static Map, List>
-        groupByJoinedTupleIds(List eqJoinConjunctSlots) {
+        public static Map, List> groupByJoinedTupleIds(
+                List eqJoinConjunctSlots) {
             Map, List> scanSlotsByJoinedTids =
                     new LinkedHashMap<>();
             for (EqJoinConjunctScanSlots slots : eqJoinConjunctSlots) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
index 980701e6ca5426..c22b4842e2ef2d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
@@ -131,10 +131,10 @@ private void initHiveTblProperties() throws DdlException {
         this.fileFormat = HiveMetaStoreClientHelper.HiveFileFormat.getFormat(remoteHiveTable.getSd().getInputFormat());
 
         Map serDeInfoParams = remoteHiveTable.getSd().getSerdeInfo().getParameters();
-        this.columnSeparator = Strings.isNullOrEmpty(serDeInfoParams.get("field.delim")) ?
-                HIVE_DEFAULT_COLUMN_SEPARATOR : serDeInfoParams.get("field.delim");
-        this.lineDelimiter = Strings.isNullOrEmpty(serDeInfoParams.get("line.delim")) ?
-                HIVE_DEFAULT_LINE_DELIMITER : serDeInfoParams.get("line.delim");
+        this.columnSeparator = Strings.isNullOrEmpty(serDeInfoParams.get("field.delim"))
+                ? HIVE_DEFAULT_COLUMN_SEPARATOR : serDeInfoParams.get("field.delim");
+        this.lineDelimiter = Strings.isNullOrEmpty(serDeInfoParams.get("line.delim"))
+                ? HIVE_DEFAULT_LINE_DELIMITER : serDeInfoParams.get("line.delim");
         this.path = remoteHiveTable.getSd().getLocation();
         for (FieldSchema fieldSchema : remoteHiveTable.getPartitionKeys()) {
             this.partitionKeys.add(fieldSchema.getName());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java
index 9a7dd84c77e71e..e3612c6c5a5839 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java
@@ -145,7 +145,7 @@ public boolean isBroadcastCostSmaller()  {
     public long constructHashTableSpace() {
         double bucketPointerSpace = ((double) rhsTreeCardinality / 0.75) * 8;
         double nodeArrayLen =
-                Math.pow(1.5, (int) ((Math.log((double) rhsTreeCardinality/4096) / Math.log(1.5)) + 1)) * 4096;
+                Math.pow(1.5, (int) ((Math.log((double) rhsTreeCardinality / 4096) / Math.log(1.5)) + 1)) * 4096;
         double nodeOverheadSpace = nodeArrayLen * 16;
         double nodeTuplePointerSpace = nodeArrayLen * rhsTreeTupleIdNum * 8;
         return Math.round((bucketPointerSpace + (double) rhsTreeCardinality * rhsTreeAvgRowSize
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPruner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPruner.java
index 7e90bd9fb55818..64b7650045d930 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPruner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPruner.java
@@ -190,7 +190,6 @@ private Collection prune(
             BoundType upperType = filter.upperBoundInclusive ? BoundType.CLOSED : BoundType.OPEN;
             boolean isPushMin = false;
             boolean isPushMax = false;
-            int lastColumnId = partitionColumns.size() - 1;
             if (filter.lowerBound != null) {
                 minKey.pushColumn(filter.lowerBound, keyColumn.getDataType());
             } else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPrunerV2.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPrunerV2.java
index fffff0fb6626e7..eefe8ba11488e8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPrunerV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ListPartitionPrunerV2.java
@@ -51,8 +51,8 @@ public ListPartitionPrunerV2(Map idToPartitionItem,
             idToPartitionItem.forEach((id, item) -> {
                 List keys = item.getItems();
                 List> ranges = keys.stream()
-                    .map(key -> Range.closed(key, key))
-                    .collect(Collectors.toList());
+                        .map(key -> Range.closed(key, key))
+                        .collect(Collectors.toList());
                 for (int i = 0; i < ranges.size(); i++) {
                     uidToPartitionRange.put(new ListPartitionUniqueId(id, i), ranges.get(i));
                 }
@@ -66,8 +66,8 @@ RangeMap getCandidateRangeMap() {
         idToPartitionItem.forEach((id, item) -> {
             List keys = item.getItems();
             List> ranges = keys.stream()
-                .map(key -> Range.closed(key, key))
-                .collect(Collectors.toList());
+                    .map(key -> Range.closed(key, key))
+                    .collect(Collectors.toList());
             for (int i = 0; i < ranges.size(); i++) {
                 candidate.put(mapPartitionKeyRange(ranges.get(i), 0),
                     new ListPartitionUniqueId(id, i));
@@ -101,7 +101,7 @@ FinalFilters getFinalFilters(ColumnRange columnRange,
 
     @Override
     Collection pruneMultipleColumnPartition(
-        Map columnToFilters) throws AnalysisException {
+            Map columnToFilters) throws AnalysisException {
         Map, UniqueId> rangeToId = Maps.newHashMap();
         uidToPartitionRange.forEach((uid, range) -> rangeToId.put(range, uid));
         return doPruneMultiple(columnToFilters, rangeToId, 0);
@@ -125,11 +125,9 @@ private Collection doPruneMultiple(Map columnToFilte
                 // Grouping partition ranges by the range of column value indexed by `columnIdx`,
                 // so that to compare with the filters.
                 Map, List> grouped =
-                    partitionRangeToUid
-                        .entrySet()
-                        .stream()
-                        .collect(Collectors.groupingBy(entry -> mapPartitionKeyRange(entry.getKey(), columnIdx),
-                            Collectors.mapping(Map.Entry::getValue, Collectors.toList())));
+                        partitionRangeToUid.entrySet().stream()
+                                .collect(Collectors.groupingBy(entry -> mapPartitionKeyRange(entry.getKey(), columnIdx),
+                                        Collectors.mapping(Map.Entry::getValue, Collectors.toList())));
 
                 // Convert the grouped map to a RangeMap.
                 TreeRangeMap> candidateRangeMap = TreeRangeMap.create();
@@ -138,14 +136,13 @@ private Collection doPruneMultiple(Map columnToFilte
                 return finalFilters.filters.stream()
                     .map(filter -> {
                         RangeMap> filtered =
-                            candidateRangeMap.subRangeMap(filter);
+                                candidateRangeMap.subRangeMap(filter);
                         // Find PartitionKey ranges according to filtered UniqueIds.
                         Map, UniqueId> filteredPartitionRange =
-                            filtered.asMapOfRanges().values()
-                                .stream()
-                                .flatMap(List::stream)
-                                .collect(Collectors.toMap(
-                                    uidToPartitionRange::get, Function.identity()));
+                                filtered.asMapOfRanges().values()
+                                        .stream()
+                                        .flatMap(List::stream)
+                                        .collect(Collectors.toMap(uidToPartitionRange::get, Function.identity()));
                         return doPruneMultiple(columnToFilters, filteredPartitionRange,
                             columnIdx + 1);
                     })
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java b/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
index bc8963681bd986..aa613540e3b875 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
@@ -119,8 +119,8 @@ public BestIndexInfo selectBestMV(ScanNode scanNode) throws UserException {
             return null;
         }
         long bestIndexId = priorities(olapScanNode, candidateIndexIdToSchema);
-        LOG.debug("The best materialized view is {} for scan node {} in query {}, " +
-                        "isPreAggregation: {}, reasonOfDisable: {}, cost {}",
+        LOG.debug("The best materialized view is {} for scan node {} in query {}, "
+                        + "isPreAggregation: {}, reasonOfDisable: {}, cost {}",
                 bestIndexId, scanNode.getId(), selectStmt.toSql(), isPreAggregation, reasonOfDisable,
                 (System.currentTimeMillis() - start));
         return new BestIndexInfo(bestIndexId, isPreAggregation, reasonOfDisable);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcScanNode.java
index 1a5c0c0573c3cd..9049ffc50561f2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcScanNode.java
@@ -138,8 +138,11 @@ private String getOdbcQueryStr() {
         }
 
         // Other DataBase use limit do top n
-        if (shouldPushDownLimit() && (odbcType == TOdbcTableType.MYSQL || odbcType == TOdbcTableType.POSTGRESQL || odbcType == TOdbcTableType.MONGODB) ) {
-            sql.append(" LIMIT " + limit);
+        if (shouldPushDownLimit()
+                && (odbcType == TOdbcTableType.MYSQL
+                || odbcType == TOdbcTableType.POSTGRESQL
+                || odbcType == TOdbcTableType.MONGODB)) {
+            sql.append(" LIMIT ").append(limit);
         }
 
         return sql.toString();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcTableSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcTableSink.java
index ca98d857887a3d..68df73c63eb7f7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcTableSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OdbcTableSink.java
@@ -26,7 +26,7 @@
 import org.apache.doris.thrift.TOdbcTableType;
 
 public class OdbcTableSink extends DataSink {
-    private final TOdbcTableType odbcType ;
+    private final TOdbcTableType odbcType;
     private final String tblName;
     private final String originTblName;
     private final String connectString;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
index cbaf45368bb3de..a9aa49c9bf7a1c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
@@ -172,7 +172,9 @@ public void closePreAggregation(String reason) {
         setCanTurnOnPreAggr(false);
     }
 
-    public long getTotalTabletsNum() { return totalTabletsNum; }
+    public long getTotalTabletsNum() {
+        return totalTabletsNum;
+    }
 
     public boolean getForceOpenPreAgg() {
         return forceOpenPreAgg;
@@ -252,7 +254,7 @@ public void updateScanRangeInfoByNewMVSelector(long selectedIndexId, boolean isP
         String situation;
         boolean update;
         CHECK:
-        {
+        { // CHECKSTYLE IGNORE THIS LINE
             if (olapTable.getKeysType() == KeysType.DUP_KEYS) {
                 situation = "The key type of table is duplicate.";
                 update = true;
@@ -272,18 +274,20 @@ public void updateScanRangeInfoByNewMVSelector(long selectedIndexId, boolean isP
             situation = "The key type of table is aggregated.";
             update = false;
             break CHECK;
-        }
+        } // CHECKSTYLE IGNORE THIS LINE
 
         if (update) {
             this.selectedIndexId = selectedIndexId;
             setIsPreAggregation(isPreAggregation, reasonOfDisable);
             updateColumnType();
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Using the new scan range info instead of the old one. {}, {}", situation ,scanRangeInfo);
+                LOG.debug("Using the new scan range info instead of the old one. {}, {}",
+                        situation, scanRangeInfo);
             }
         } else {
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Using the old scan range info instead of the new one. {}, {}", situation, scanRangeInfo);
+                LOG.debug("Using the old scan range info instead of the new one. {}, {}",
+                        situation, scanRangeInfo);
             }
         }
     }
@@ -597,9 +601,9 @@ private void computePartitionInfo() throws AnalysisException {
         }
         selectedPartitionNum = selectedPartitionIds.size();
 
-        for(long id : selectedPartitionIds){
+        for (long id : selectedPartitionIds) {
             Partition partition = olapTable.getPartition(id);
-            if(partition.getState() == PartitionState.RESTORE){
+            if (partition.getState() == PartitionState.RESTORE) {
                 ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_PARTITION_STATE, partition.getName(), "RESTORING");
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
index 8bdd82ecef203c..b9863033c0f83b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
@@ -301,7 +301,7 @@ private void setPartitionKeys(TOlapTablePartition tPartition, PartitionItem part
                     tPartition.addToEndKeys(range.upperEndpoint().getKeys().get(i).treeToThrift().getNodes().get(0));
                 }
             }
-        } else if (partitionItem instanceof ListPartitionItem){
+        } else if (partitionItem instanceof ListPartitionItem) {
             List partitionKeys = partitionItem.getItems();
             // set in keys
             for (PartitionKey partitionKey : partitionKeys) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java
index 14841443a6f486..5fa1af78322b49 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java
@@ -117,14 +117,14 @@ public String toString() {
         if (null == lowerBound) {
             str += "lowerBound is UNSET";
         } else {
-            str += "lowerBound is " + lowerBound.getStringValue() + " and lowerBoundInclusive is " +
-                    lowerBoundInclusive;
+            str += "lowerBound is " + lowerBound.getStringValue() + " and lowerBoundInclusive is "
+                    + lowerBoundInclusive;
         }
         if (null == upperBound) {
             str += "\nupperBound is UNSET";
         } else {
-            str += "\nupperBound is " + upperBound.getStringValue() + " and upperBoundInclusive is " +
-                    upperBoundInclusive;
+            str += "\nupperBound is " + upperBound.getStringValue() + " and upperBoundInclusive is "
+                    + upperBoundInclusive;
         }
         if (null == inPredicate) {
             str += "\ninPredicate is UNSET";
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPrunerV2Base.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPrunerV2Base.java
index 0101d71337edcd..d2b169d3de7006 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPrunerV2Base.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPrunerV2Base.java
@@ -95,7 +95,7 @@ abstract FinalFilters getFinalFilters(ColumnRange columnRange,
      * unify the logic like pruning multiple list columns partition for multiple range ones.
      */
     abstract Collection pruneMultipleColumnPartition(
-        Map columnToFilters) throws AnalysisException;
+            Map columnToFilters) throws AnalysisException;
 
     /**
      * Now we could unify the logic of pruning single column partition for both list and range
@@ -129,8 +129,8 @@ protected Range mapPartitionKeyRange(Range fromRange,
             partitionKey -> ColumnBound.of(partitionKey.getKeys().get(columnIdx)));
     }
 
-    protected 
-    Range mapRange(Range range, Function mapper) {
+    protected  Range mapRange(
+            Range range, Function mapper) {
         TO lower = range.hasLowerBound() ? mapper.apply(range.lowerEndpoint()) : null;
         TO upper = range.hasUpperBound() ? mapper.apply(range.upperEndpoint()) : null;
         if (range.hasUpperBound()) {
@@ -182,8 +182,7 @@ private FinalFilters(Type type, Set> filters) {
 
         private static final FinalFilters NO_FILTERS = new FinalFilters(Type.NO_FILTERS, null);
 
-        private static final FinalFilters CONSTANT_FALSE_FILTERS =
-            new FinalFilters(Type.CONSTANT_FALSE_FILTERS, null);
+        private static final FinalFilters CONSTANT_FALSE_FILTERS = new FinalFilters(Type.CONSTANT_FALSE_FILTERS, null);
 
         public static FinalFilters noFilters() {
             return NO_FILTERS;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java
index d49165e1eadf40..eea0afde9d59c8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java
@@ -323,7 +323,9 @@ public void updateDataPartition(DataPartition dataPartition) {
         this.dataPartition = dataPartition;
     }
 
-    public PlanFragmentId getId() { return fragmentId; }
+    public PlanFragmentId getId() {
+        return fragmentId;
+    }
 
     public PlanFragment getDestFragment() {
         if (destNode == null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragmentId.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragmentId.java
index 72872b99bb788d..ef5764489ba559 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragmentId.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragmentId.java
@@ -31,9 +31,14 @@ public PlanFragmentId(int id) {
     public static IdGenerator createGenerator() {
         return new IdGenerator() {
             @Override
-            public PlanFragmentId getNextId() { return new PlanFragmentId(nextId++); }
+            public PlanFragmentId getNextId() {
+                return new PlanFragmentId(nextId++);
+            }
+
             @Override
-            public PlanFragmentId getMaxId() { return new PlanFragmentId(nextId - 1); }
+            public PlanFragmentId getMaxId() {
+                return new PlanFragmentId(nextId - 1);
+            }
         };
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java
index f1bef3089673a1..4b1bc1cde0b255 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java
@@ -147,8 +147,7 @@ protected PlanNode(PlanNodeId id, ArrayList tupleIds, String planNodeNa
         this.tupleIds = Lists.newArrayList(tupleIds);
         this.tblRefIds = Lists.newArrayList(tupleIds);
         this.cardinality = -1;
-        this.planNodeName = VectorizedUtil.isVectorized() ?
-                "V" + planNodeName : planNodeName;
+        this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
         this.numInstances = 1;
     }
 
@@ -158,8 +157,7 @@ protected PlanNode(PlanNodeId id, String planNodeName) {
         this.tupleIds = Lists.newArrayList();
         this.tblRefIds = Lists.newArrayList();
         this.cardinality = -1;
-        this.planNodeName = VectorizedUtil.isVectorized() ?
-                "V" + planNodeName : planNodeName;
+        this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
         this.numInstances = 1;
     }
 
@@ -175,8 +173,7 @@ protected PlanNode(PlanNodeId id, PlanNode node, String planNodeName) {
         this.conjuncts = Expr.cloneList(node.conjuncts, null);
         this.cardinality = -1;
         this.compactData = node.compactData;
-        this.planNodeName = VectorizedUtil.isVectorized() ?
-                "V" + planNodeName : planNodeName;
+        this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
         this.numInstances = 1;
         this.nodeType = nodeType;
     }
@@ -309,7 +306,7 @@ protected List getAllScanTupleIds() {
         List tupleIds = Lists.newArrayList();
         List scanNodes = Lists.newArrayList();
         collectAll(Predicates.instanceOf(ScanNode.class), scanNodes);
-        for(ScanNode node: scanNodes) {
+        for (ScanNode node : scanNodes) {
             tupleIds.addAll(node.getTupleIds());
         }
         return tupleIds;
@@ -362,7 +359,7 @@ Expr convertConjunctsToAndCompoundPredicate(List conjuncts) {
         List targetConjuncts = Lists.newArrayList(conjuncts);
         while (targetConjuncts.size() > 1) {
             List newTargetConjuncts = Lists.newArrayList();
-            for (int i = 0; i < targetConjuncts.size(); i+= 2) {
+            for (int i = 0; i < targetConjuncts.size(); i += 2) {
                 Expr expr = i + 1 < targetConjuncts.size() ? new CompoundPredicate(CompoundPredicate.Operator.AND, targetConjuncts.get(i),
                         targetConjuncts.get(i + 1)) : targetConjuncts.get(i);
                 newTargetConjuncts.add(expr);
@@ -871,18 +868,24 @@ public ScanNode getScanNodeInOneFragmentByTupleId(TupleId tupleId) {
         return null;
     }
 
-    protected void addRuntimeFilter(RuntimeFilter filter) { runtimeFilters.add(filter); }
+    protected void addRuntimeFilter(RuntimeFilter filter) {
+        runtimeFilters.add(filter);
+    }
 
-    protected Collection getRuntimeFilters() { return runtimeFilters; }
+    protected Collection getRuntimeFilters() {
+        return runtimeFilters;
+    }
 
-    public void clearRuntimeFilters() { runtimeFilters.clear(); }
+    public void clearRuntimeFilters() {
+        runtimeFilters.clear();
+    }
 
     protected String getRuntimeFilterExplainString(boolean isBuildNode) {
         if (runtimeFilters.isEmpty()) {
             return "";
         }
         List filtersStr = new ArrayList<>();
-        for (RuntimeFilter filter: runtimeFilters) {
+        for (RuntimeFilter filter : runtimeFilters) {
             StringBuilder filterStr = new StringBuilder();
             filterStr.append(filter.getFilterId());
             filterStr.append("[");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNodeId.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNodeId.java
index 26e5a0799fe17d..406fcd2894ab07 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNodeId.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNodeId.java
@@ -31,9 +31,14 @@ public PlanNodeId(int id) {
     public static IdGenerator createGenerator() {
         return new IdGenerator() {
             @Override
-            public PlanNodeId getNextId() { return new PlanNodeId(nextId++); }
+            public PlanNodeId getNextId() {
+                return new PlanNodeId(nextId++);
+            }
+
             @Override
-            public PlanNodeId getMaxId() { return new PlanNodeId(nextId - 1); }
+            public PlanNodeId getMaxId() {
+                return new PlanNodeId(nextId - 1);
+            }
         };
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java
index 720032df67c16a..b29e581e6de83a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java
@@ -75,7 +75,9 @@ public List getFragments() {
         return fragments;
     }
 
-    public PlannerContext getPlannerContext() { return plannerContext;}
+    public PlannerContext getPlannerContext() {
+        return plannerContext;
+    }
 
     public List getScanNodes() {
         if (singleNodePlanner == null) {
@@ -99,11 +101,11 @@ private void setResultExprScale(Analyzer analyzer, ArrayList outputExprs)
                     expr.getIds(null, slotList);
                     if (PrimitiveType.DECIMALV2 != expr.getType().getPrimitiveType()) {
                         continue;
-                            }
+                    }
 
                     if (PrimitiveType.DECIMALV2 != slotDesc.getType().getPrimitiveType()) {
                         continue;
-                            }
+                    }
 
                     if (slotList.contains(slotDesc.getId()) && null != slotDesc.getColumn()) {
                         int outputScale = slotDesc.getColumn().getScale();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java
index 3d9be70c42b57b..86a228eb9fcb74 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java
@@ -59,12 +59,31 @@ public PlannerContext(Analyzer analyzer, QueryStmt queryStmt, TQueryOptions quer
         this.statement = statement;
     }
 
-    public QueryStmt getQueryStmt() { return queryStmt; }
-    public TQueryOptions getQueryOptions() { return queryOptions; } // getRootAnalyzer().getQueryOptions(); }
-    public Analyzer getRootAnalyzer() { return analyzer; } // analysisResult_.getAnalyzer(); }
-    public boolean isSingleNodeExec() { return getQueryOptions().num_nodes == 1; }
-    public PlanNodeId getNextNodeId() { return nodeIdGenerator.getNextId(); }
-    public PlanFragmentId getNextFragmentId() { return fragmentIdGenerator.getNextId(); }
+    public QueryStmt getQueryStmt() {
+        return queryStmt;
+    }
+
+    public TQueryOptions getQueryOptions() {
+        return queryOptions;
+    } // getRootAnalyzer().getQueryOptions(); }
+
+    public Analyzer getRootAnalyzer() {
+        return analyzer;
+    } // analysisResult_.getAnalyzer(); }
+
+    public boolean isSingleNodeExec() {
+        return getQueryOptions().num_nodes == 1;
+    }
 
-    public boolean isInsert() { return statement instanceof InsertStmt; }
+    public PlanNodeId getNextNodeId() {
+        return nodeIdGenerator.getNextId();
+    }
+
+    public PlanFragmentId getNextFragmentId() {
+        return fragmentIdGenerator.getNextId();
+    }
+
+    public boolean isInsert() {
+        return statement instanceof InsertStmt;
+    }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RangePartitionPrunerV2.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RangePartitionPrunerV2.java
index d2642473313f0d..256c5c11f4e43b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/RangePartitionPrunerV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RangePartitionPrunerV2.java
@@ -62,8 +62,7 @@ RangeMap getCandidateRangeMap() {
      * This is just like the logic in v1 version, but we support disjunctive predicates here.
      */
     @Override
-    Collection pruneMultipleColumnPartition(
-        Map columnToFilters) throws AnalysisException {
+    Collection pruneMultipleColumnPartition(Map columnToFilters) throws AnalysisException {
         PartitionKey minKey = new PartitionKey();
         PartitionKey maxKey = new PartitionKey();
         RangeMap rangeMap = TreeRangeMap.create();
@@ -113,7 +112,7 @@ FinalFilters getFinalFilters(ColumnRange columnRange,
 
     private Range getMinInfinityRange(Column column) throws AnalysisException {
         ColumnBound value = ColumnBound.of(
-            LiteralExpr.createInfinity(Type.fromPrimitiveType(column.getDataType()), false));
+                LiteralExpr.createInfinity(Type.fromPrimitiveType(column.getDataType()), false));
         return Range.closed(value, value);
     }
 
@@ -140,14 +139,13 @@ private Collection doPruneMulti(Map columnToFilters,
                 Set> filters = finalFilters.filters;
                 Set result = Sets.newHashSet();
                 for (Range filter : filters) {
-                    if (filter.hasLowerBound() && filter.lowerBoundType() == BoundType.CLOSED &&
-                        filter.hasUpperBound() && filter.upperBoundType() == BoundType.CLOSED &&
-                        filter.lowerEndpoint() == filter.upperEndpoint()) {
+                    if (filter.hasLowerBound() && filter.lowerBoundType() == BoundType.CLOSED
+                            && filter.hasUpperBound() && filter.upperBoundType() == BoundType.CLOSED
+                            && filter.lowerEndpoint() == filter.upperEndpoint()) {
                         // Equal to predicate, e.g., col=1, the filter range is [1, 1].
                         minKey.pushColumn(filter.lowerEndpoint().getValue(), column.getDataType());
                         maxKey.pushColumn(filter.upperEndpoint().getValue(), column.getDataType());
-                        result.addAll(
-                            doPruneMulti(columnToFilters, rangeMap, columnIdx + 1, minKey, maxKey));
+                        result.addAll(doPruneMulti(columnToFilters, rangeMap, columnIdx + 1, minKey, maxKey));
                         minKey.popColumn();
                         maxKey.popColumn();
                     } else {
@@ -182,16 +180,14 @@ private Collection doPruneMulti(Map columnToFilters,
                         }
 
                         try {
-                            BoundType lowerType = filter.hasLowerBound() &&
-                                filter.lowerBoundType() == BoundType.CLOSED ?
-                                BoundType.CLOSED : BoundType.OPEN;
-                            BoundType upperType = filter.hasUpperBound() &&
-                                filter.upperBoundType() == BoundType.CLOSED ?
-                                BoundType.CLOSED : BoundType.OPEN;
-                            result.addAll(rangeMap.subRangeMap(
-                                Range.range(minKey, lowerType, maxKey, upperType))
-                                .asMapOfRanges().values());
+                            BoundType lowerType = filter.hasLowerBound() && filter.lowerBoundType() == BoundType.CLOSED
+                                    ? BoundType.CLOSED : BoundType.OPEN;
+                            BoundType upperType = filter.hasUpperBound() && filter.upperBoundType() == BoundType.CLOSED
+                                    ? BoundType.CLOSED : BoundType.OPEN;
+                            result.addAll(rangeMap.subRangeMap(Range.range(minKey, lowerType, maxKey, upperType))
+                                    .asMapOfRanges().values());
                         } catch (IllegalArgumentException e) {
+                            // CHECKSTYLE IGNORE THIS LINE
                         }
 
                         for (; pushMinCount > 0; pushMinCount--) {
@@ -215,7 +211,7 @@ private void pushInfinity(PartitionKey key, int columnIdx,
                               boolean isMax) throws AnalysisException {
         Column column = partitionColumns.get(columnIdx);
         key.pushColumn(LiteralExpr.createInfinity(Type.fromPrimitiveType(column.getDataType()), isMax),
-            column.getDataType());
+                column.getDataType());
     }
 
     private Collection noFiltersResult(PartitionKey minKey, PartitionKey maxKey,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ResultFileSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ResultFileSink.java
index 0ba37afbc6d912..02ac6c6a31912a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/ResultFileSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ResultFileSink.java
@@ -65,8 +65,8 @@ private String genNames(ArrayList headerNames, String columnSeparator, S
 
     public ResultFileSink(PlanNodeId exchNodeId, OutFileClause outFileClause, ArrayList labels) {
         this(exchNodeId, outFileClause);
-        if (outFileClause.getHeaderType().equals(FeConstants.csv_with_names) ||
-                outFileClause.getHeaderType().equals(FeConstants.csv_with_names_and_types)) {
+        if (outFileClause.getHeaderType().equals(FeConstants.csv_with_names)
+                || outFileClause.getHeaderType().equals(FeConstants.csv_with_names_and_types)) {
             header = genNames(labels, outFileClause.getColumnSeparator(), outFileClause.getLineDelimiter());
         }
         headerType = outFileClause.getHeaderType();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java
index 00871f1d33fab7..98a8bf21282fcf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java
@@ -62,7 +62,7 @@ public RollupSelector(Analyzer analyzer, TupleDescriptor tupleDesc, OlapTable ta
     public long selectBestRollup(
             Collection partitionIds, List conjuncts, boolean isPreAggregation)
             throws UserException {
-        Preconditions.checkArgument(partitionIds != null , "Paritition can't be null.");
+        Preconditions.checkArgument(partitionIds != null, "Paritition can't be null.");
 
         ConnectContext connectContext = ConnectContext.get();
         if (connectContext != null && connectContext.getSessionVariable().isUseV2Rollup()) {
@@ -268,12 +268,12 @@ private boolean isPredicateUsedForPrefixIndex(Expr expr, boolean isJoinConjunct)
             return false;
         }
         if (expr instanceof InPredicate) {
-            return isInPredicateUsedForPrefixIndex((InPredicate)expr);
+            return isInPredicateUsedForPrefixIndex((InPredicate) expr);
         } else if (expr instanceof BinaryPredicate) {
             if (isJoinConjunct) {
-                return isEqualJoinConjunctUsedForPrefixIndex((BinaryPredicate)expr);
+                return isEqualJoinConjunctUsedForPrefixIndex((BinaryPredicate) expr);
             } else {
-                return isBinaryPredicateUsedForPrefixIndex((BinaryPredicate)expr);
+                return isBinaryPredicateUsedForPrefixIndex((BinaryPredicate) expr);
             }
         }
         return true;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java
index a23570b81534d8..48708e3a277b2b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java
@@ -119,10 +119,10 @@ public RuntimeFilterTarget(ScanNode targetNode, Expr targetExpr,
 
         @Override
         public String toString() {
-            return "Target Id: " + node.getId() + " " +
-                    "Target expr: " + expr.debugString() + " " +
-                    "Is only Bound By Key: " + isBoundByKeyColumns +
-                    "Is local: " + isLocalTarget;
+            return "Target Id: " + node.getId() + " "
+                    + "Target expr: " + expr.debugString() + " "
+                    + "Is only Bound By Key: " + isBoundByKeyColumns
+                    + "Is local: " + isLocalTarget;
         }
     }
 
@@ -149,10 +149,17 @@ public boolean equals(Object obj) {
     }
 
     @Override
-    public int hashCode() { return id.hashCode(); }
+    public int hashCode() {
+        return id.hashCode();
+    }
+
+    public void markFinalized() {
+        finalized = true;
+    }
 
-    public void markFinalized() { finalized = true; }
-    public boolean isFinalized() { return finalized; }
+    public boolean isFinalized() {
+        return finalized;
+    }
 
     /**
      * Serializes a runtime filter to Thrift.
@@ -173,16 +180,45 @@ public TRuntimeFilterDesc toThrift() {
         return tFilter;
     }
 
-    public List getTargets() { return targets; }
-    public boolean hasTargets() { return !targets.isEmpty(); }
-    public Expr getSrcExpr() { return srcExpr; }
-    public Expr getOrigTargetExpr() { return origTargetExpr; }
-    public Map> getTargetSlots() { return targetSlotsByTid; }
-    public RuntimeFilterId getFilterId() { return id; }
-    public TRuntimeFilterType getType() { return runtimeFilterType; }
-    public void setType(TRuntimeFilterType type) { runtimeFilterType = type; }
-    public boolean hasRemoteTargets() { return hasRemoteTargets; }
-    public HashJoinNode getBuilderNode() { return builderNode; }
+    public List getTargets() {
+        return targets;
+    }
+
+    public boolean hasTargets() {
+        return !targets.isEmpty();
+    }
+
+    public Expr getSrcExpr() {
+        return srcExpr;
+    }
+
+    public Expr getOrigTargetExpr() {
+        return origTargetExpr;
+    }
+
+    public Map> getTargetSlots() {
+        return targetSlotsByTid;
+    }
+
+    public RuntimeFilterId getFilterId() {
+        return id;
+    }
+
+    public TRuntimeFilterType getType() {
+        return runtimeFilterType;
+    }
+
+    public void setType(TRuntimeFilterType type) {
+        runtimeFilterType = type;
+    }
+
+    public boolean hasRemoteTargets() {
+        return hasRemoteTargets;
+    }
+
+    public HashJoinNode getBuilderNode() {
+        return builderNode;
+    }
 
     /**
      * Static function to create a RuntimeFilter from 'joinPredicate' that is assigned
@@ -313,7 +349,7 @@ so after the COALESCE() the join condition becomes 100 = 100.
         Map> slotsByTid = new HashMap<>();
         // We need to iterate over all the slots of 'expr' and check if they have
         // equivalent slots that are bound by the same base table tuple(s).
-        for (SlotId slotId: sids) {
+        for (SlotId slotId : sids) {
             Map> currSlotsByTid = getBaseTblEquivSlots(analyzer, slotId);
             if (currSlotsByTid.isEmpty()) {
                 return Collections.emptyMap();
@@ -357,7 +393,7 @@ so after the COALESCE() the join condition becomes 100 = 100.
     private static Map> getBaseTblEquivSlots(Analyzer analyzer,
                                                                    SlotId srcSid) {
         Map> slotsByTid = new HashMap<>();
-        for (SlotId targetSid: analyzer.getValueTransferTargets(srcSid)) {
+        for (SlotId targetSid : analyzer.getValueTransferTargets(srcSid)) {
             TupleDescriptor tupleDesc = analyzer.getSlotDesc(targetSid).getParent();
             if (tupleDesc.getTable() == null) {
                 continue;
@@ -369,7 +405,7 @@ private static Map> getBaseTblEquivSlots(Analyzer analyzer
     }
 
     public Expr getTargetExpr(PlanNodeId targetPlanNodeId) {
-        for (RuntimeFilterTarget target: targets) {
+        for (RuntimeFilterTarget target : targets) {
             if (target.node.getId() != targetPlanNodeId) {
                 continue;
             }
@@ -392,16 +428,22 @@ public double getSelectivity() {
         return builderNode.getCardinality() / (double) builderNode.getChild(0).getCardinality();
     }
 
-    public void addTarget(RuntimeFilterTarget target) { targets.add(target); }
+    public void addTarget(RuntimeFilterTarget target) {
+        targets.add(target);
+    }
 
-    public void setIsBroadcast(boolean isBroadcast) { isBroadcastJoin = isBroadcast; }
+    public void setIsBroadcast(boolean isBroadcast) {
+        isBroadcastJoin = isBroadcast;
+    }
 
-    public void computeNdvEstimate() { ndvEstimate = builderNode.getChild(1).getCardinality(); }
+    public void computeNdvEstimate() {
+        ndvEstimate = builderNode.getChild(1).getCardinality();
+    }
 
     public void extractTargetsPosition() {
         Preconditions.checkNotNull(builderNode.getFragment());
         Preconditions.checkState(hasTargets());
-        for (RuntimeFilterTarget target: targets) {
+        for (RuntimeFilterTarget target : targets) {
             Preconditions.checkNotNull(target.node.getFragment());
             hasLocalTargets = hasLocalTargets || target.isLocalTarget;
             hasRemoteTargets = hasRemoteTargets || !target.isLocalTarget;
@@ -442,7 +484,7 @@ public static int getMinLogSpaceForBloomFilter(long ndv, double fpp) {
         double m = -k * ndv / Math.log(1 - Math.pow(fpp, 1.0 / k));
 
         // Handle case where ndv == 1 => ceil(log2(m/8)) < 0.
-        return Math.max(0, (int)(Math.ceil(Math.log(m / 8)/Math.log(2))));
+        return Math.max(0, (int) (Math.ceil(Math.log(m / 8) / Math.log(2))));
     }
 
     /**
@@ -452,7 +494,7 @@ public void assignToPlanNodes() {
         Preconditions.checkState(hasTargets());
         builderNode.addRuntimeFilter(this);
         builderNode.fragment.setBuilderRuntimeFilterIds(getFilterId());
-        for (RuntimeFilterTarget target: targets) {
+        for (RuntimeFilterTarget target : targets) {
             target.node.addRuntimeFilter(this);
             // fragment is expected to use this filter id
             target.node.fragment.setTargetRuntimeFilterIds(this.id);
@@ -469,11 +511,11 @@ public void registerToPlan(Analyzer analyzer) {
     }
 
     public String debugString() {
-        return "FilterID: " + id + " " +
-                "Source: " + builderNode.getId() + " " +
-                "SrcExpr: " + getSrcExpr().debugString() + " " +
-                "Target(s): " +
-                Joiner.on(", ").join(targets) + " " +
-                "Selectivity: " + getSelectivity();
+        return "FilterID: " + id + " "
+                +      "Source: " + builderNode.getId() + " "
+                +      "SrcExpr: " + getSrcExpr().debugString() + " "
+                +      "Target(s): "
+                +      Joiner.on(", ").join(targets) + " "
+                + "Selectivity: " + getSelectivity();
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java
index b68df92ba4a062..3a05dc2fbf303d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java
@@ -150,7 +150,7 @@ public static void generateRuntimeFilters(Analyzer analyzer, PlanNode plan) {
         // We only enforce a limit on the number of bloom filters as they are much more
         // heavy-weight than the other filter types.
         int numBloomFilters = 0;
-        for (RuntimeFilter filter: filters) {
+        for (RuntimeFilter filter : filters) {
             filter.extractTargetsPosition();
             if (filter.getType() == TRuntimeFilterType.BLOOM) {
                 if (numBloomFilters >= maxNumBloomFilters) {
@@ -167,7 +167,7 @@ public static void generateRuntimeFilters(Analyzer analyzer, PlanNode plan) {
      */
     public List getRuntimeFilters() {
         Set resultSet = new HashSet<>();
-        for (List filters: runtimeFiltersByTid.values()) {
+        for (List filters : runtimeFiltersByTid.values()) {
             resultSet.addAll(filters);
         }
         List resultList = Lists.newArrayList(resultSet);
@@ -229,14 +229,14 @@ private void generateFilters(PlanNode root) {
             // Finalize every runtime filter of that join. This is to ensure that we don't
             // assign a filter to a scan node from the right subtree of joinNode or ancestor
             // join nodes in case we don't find a destination node in the left subtree.
-            for (RuntimeFilter runtimeFilter: filters) {
+            for (RuntimeFilter runtimeFilter : filters) {
                 finalizeRuntimeFilter(runtimeFilter);
             }
             generateFilters(root.getChild(1));
         } else if (root instanceof ScanNode) {
             assignRuntimeFilters((ScanNode) root);
         } else {
-            for (PlanNode childNode: root.getChildren()) {
+            for (PlanNode childNode : root.getChildren()) {
                 generateFilters(childNode);
             }
         }
@@ -249,7 +249,7 @@ private void generateFilters(PlanNode root) {
     private void registerRuntimeFilter(RuntimeFilter filter) {
         Map> targetSlotsByTid = filter.getTargetSlots();
         Preconditions.checkState(targetSlotsByTid != null && !targetSlotsByTid.isEmpty());
-        for (TupleId tupleId: targetSlotsByTid.keySet()) {
+        for (TupleId tupleId : targetSlotsByTid.keySet()) {
             registerRuntimeFilter(filter, tupleId);
         }
     }
@@ -271,10 +271,10 @@ private void registerRuntimeFilter(RuntimeFilter filter, TupleId targetTid) {
      */
     private void finalizeRuntimeFilter(RuntimeFilter runtimeFilter) {
         Set targetTupleIds = new HashSet<>();
-        for (RuntimeFilter.RuntimeFilterTarget target: runtimeFilter.getTargets()) {
+        for (RuntimeFilter.RuntimeFilterTarget target : runtimeFilter.getTargets()) {
             targetTupleIds.addAll(target.node.getTupleIds());
         }
-        for (TupleId tupleId: runtimeFilter.getTargetSlots().keySet()) {
+        for (TupleId tupleId : runtimeFilter.getTargetSlots().keySet()) {
             if (!targetTupleIds.contains(tupleId)) {
                 runtimeFiltersByTid.get(tupleId).remove(runtimeFilter);
             }
@@ -302,7 +302,7 @@ private void assignRuntimeFilters(ScanNode scanNode) {
         String runtimeFilterMode = sessionVariable.getRuntimeFilterMode();
         Preconditions.checkState(Arrays.stream(TRuntimeFilterMode.values()).map(Enum::name).anyMatch(
                 p -> p.equals(runtimeFilterMode.toUpperCase())), "runtimeFilterMode not expected");
-        for (RuntimeFilter filter: runtimeFiltersByTid.get(tid)) {
+        for (RuntimeFilter filter : runtimeFiltersByTid.get(tid)) {
             if (filter.isFinalized()) {
                 continue;
             }
@@ -371,8 +371,8 @@ private Expr computeTargetExpr(RuntimeFilter filter, TupleId targetTid) {
             targetExpr.collect(SlotRef.class, exprSlots);
             // targetExpr specifies the id of the slotRef node in the `tupleID`
             List sids = filter.getTargetSlots().get(targetTid);
-            for (SlotRef slotRef: exprSlots) {
-                for (SlotId sid: sids) {
+            for (SlotRef slotRef : exprSlots) {
+                for (SlotId sid : sids) {
                     if (analyzer.hasValueTransfer(slotRef.getSlotId(), sid)) {
                         SlotRef newSlotRef = new SlotRef(analyzer.getSlotDesc(sid));
                         newSlotRef.analyzeNoThrow(analyzer);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java
index c308896c3a03da..9837ad138306d3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java
@@ -153,8 +153,8 @@ private ColumnRange createColumnRange(SlotDescriptor desc,
                 continue;
             }
 
-            if (expr instanceof CompoundPredicate &&
-                ((CompoundPredicate) expr).getOp() == CompoundPredicate.Operator.OR) {
+            if (expr instanceof CompoundPredicate
+                    && ((CompoundPredicate) expr).getOp() == CompoundPredicate.Operator.OR) {
                 // Try to get column filter from disjunctive predicates.
                 List disjunctivePredicates = PredicateUtils.splitDisjunctivePredicates(expr);
                 if (disjunctivePredicates.isEmpty()) {
@@ -215,8 +215,7 @@ private ColumnRanges expressionToRanges(Expr expr,
             BinaryPredicate binPred = (BinaryPredicate) expr;
             Expr slotBinding = binPred.getSlotBinding(desc.getId());
 
-            if (slotBinding == null || !slotBinding.isConstant() ||
-                !(slotBinding instanceof LiteralExpr)) {
+            if (slotBinding == null || !slotBinding.isConstant() || !(slotBinding instanceof LiteralExpr)) {
                 return ColumnRanges.createFailure();
             }
 
@@ -259,8 +258,7 @@ private ColumnRanges expressionToRanges(Expr expr,
             }
 
             for (int i = 1; i < inPredicate.getChildren().size(); ++i) {
-                ColumnBound bound =
-                    ColumnBound.of((LiteralExpr) inPredicate.getChild(i));
+                ColumnBound bound = ColumnBound.of((LiteralExpr) inPredicate.getChild(i));
                 result.add(Range.closed(bound, bound));
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java
index b500fed25f5026..901e461b76652e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java
@@ -224,8 +224,8 @@ private boolean isChildPassthrough(
         List childTupleIds = childNode.getTupleIds();
         // Check that if the child outputs a single tuple, then it's not nullable. Tuple
         // nullability can be considered to be part of the physical row layout.
-        Preconditions.checkState(childTupleIds.size() != 1 ||
-                !childNode.getNullableTupleIds().contains(childTupleIds.get(0)));
+        Preconditions.checkState(childTupleIds.size() != 1
+                || !childNode.getNullableTupleIds().contains(childTupleIds.get(0)));
         // If the Union node is inside a subplan, passthrough should be disabled to avoid
         // performance issues by forcing tiny batches.
         // TODO: Remove this as part of IMPALA-4179.
@@ -321,7 +321,7 @@ public void init(Analyzer analyzer) {
     }
 
     protected void toThrift(TPlanNode msg, TPlanNodeType nodeType) {
-        Preconditions.checkState( materializedResultExprLists.size() == children.size());
+        Preconditions.checkState(materializedResultExprLists.size() == children.size());
         List> texprLists = Lists.newArrayList();
         for (List exprList : materializedResultExprLists) {
             texprLists.add(Expr.treesToThrift(exprList));
@@ -375,7 +375,7 @@ public String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
         if (detailLevel == TExplainLevel.VERBOSE) {
             if (CollectionUtils.isNotEmpty(materializedResultExprLists)) {
                 output.append(prefix).append("child exprs: ").append("\n");
-                for(List exprs : materializedResultExprLists) {
+                for (List exprs : materializedResultExprLists) {
                     output.append(prefix).append("    ").append(exprs.stream().map(Expr::toSql)
                             .collect(Collectors.joining(" | "))).append("\n");
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
index 521d8e23432181..d5a02ddaae2be3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
@@ -110,7 +110,7 @@ public TExecPlanFragmentParams plan(TUniqueId loadId) throws UserException {
             throw new AnalysisException("load by MERGE or DELETE is only supported in unique tables.");
         }
         if (taskInfo.getMergeType() != LoadTask.MergeType.APPEND
-                && !destTable.hasDeleteSign() ) {
+                && !destTable.hasDeleteSign()) {
             throw new AnalysisException("load by MERGE or DELETE need to upgrade table to support batch delete.");
         }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java
index 2e508f4f02b3e4..b8b4d60e020522 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java
@@ -193,7 +193,9 @@ public List getScanRangeLocations(long maxScanRangeLength)
     }
 
     @Override
-    public int getNumInstances() { return 1; }
+    public int getNumInstances() {
+        return 1;
+    }
 
     @Override
     public String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java
index d8d8d0e5a1701b..40a42fc035473f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java
@@ -206,7 +206,7 @@ Plugin dynamicLoadPlugin(boolean closePreviousPlugin) throws IOException, UserEx
 
         Class pluginClass;
         try {
-             pluginClass = loader.loadClass(pluginInfo.getClassName()).asSubclass(Plugin.class);
+            pluginClass = loader.loadClass(pluginInfo.getClassName()).asSubclass(Plugin.class);
         } catch (ClassNotFoundException e) {
             throw new UserException("Could not find plugin class [" + pluginInfo.getClassName() + "]", e);
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginInfo.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginInfo.java
index ac95556a96f056..df7ceac7e00f41 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginInfo.java
@@ -122,14 +122,12 @@ public static PluginInfo readFromProperties(final Path propertiesPath, final Str
         }
 
         final Map propsMap;
-        {
-            final Properties props = new Properties();
-            try (InputStream stream = Files.newInputStream(descriptor)) {
-                props.load(stream);
-            }
-            propsMap = props.stringPropertyNames().stream()
-                    .collect(Collectors.toMap(Function.identity(), props::getProperty));
+        final Properties props = new Properties();
+        try (InputStream stream = Files.newInputStream(descriptor)) {
+            props.load(stream);
         }
+        propsMap = props.stringPropertyNames().stream()
+                .collect(Collectors.toMap(Function.identity(), props::getProperty));
 
         final String name = propsMap.remove("name");
         if (Strings.isNullOrEmpty(name)) {
@@ -235,9 +233,9 @@ public boolean equals(Object o) {
             return false;
         }
         PluginInfo that = (PluginInfo) o;
-        return Objects.equals(name, that.name) &&
-                type == that.type &&
-                Objects.equals(version, that.version);
+        return Objects.equals(name, that.name)
+                && type == that.type
+                && Objects.equals(version, that.version);
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java
index ad8bf11daa68b9..1ef190f405aa82 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java
@@ -171,8 +171,8 @@ Path extractZip(Path zip, Path targetPath) throws IOException, UserException {
                 // normalizing the path (which removes foo/..) and ensuring the normalized entry
                 // is still rooted with the target plugin directory.
                 if (!targetFile.normalize().startsWith(targetPath)) {
-                    throw new UserException("Zip contains entry name '" +
-                            entry.getName() + "' resolving outside of plugin directory");
+                    throw new UserException("Zip contains entry name '"
+                            + entry.getName() + "' resolving outside of plugin directory");
                 }
 
                 // be on the safe side: do not rely on that directories are always extracted
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java
index 59d8bd3c49e9bb..b9e1855ca5c582 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java
@@ -77,19 +77,19 @@ public boolean eventFilter(EventType type) {
     @Override
     public void exec(AuditEvent event) {
         try {
-           switch (event.type) {
-               case AFTER_QUERY:
-                   auditQueryLog(event);
-                   break;
-               case LOAD_SUCCEED:
-                   auditLoadLog(event);
-                   break;
-               case STREAM_LOAD_FINISH:
-                   auditStreamLoadLog(event);
-                   break;
-               default:
-                   break;
-           }
+            switch (event.type) {
+                case AFTER_QUERY:
+                    auditQueryLog(event);
+                    break;
+                case LOAD_SUCCEED:
+                    auditLoadLog(event);
+                    break;
+                case STREAM_LOAD_FINISH:
+                    auditStreamLoadLog(event);
+                    break;
+                default:
+                    break;
+            }
         } catch (Exception e) {
             LOG.debug("failed to process audit event", e);
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
index e0f6e1b043493c..65642bbe4b9a28 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
@@ -109,7 +109,6 @@ public class ConnectContext {
     protected boolean isSend;
 
     protected AuditEventBuilder auditEventBuilder = new AuditEventBuilder();
-    ;
 
     protected String remoteIP;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
index d495578b753514..f3b683de518bcc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
@@ -501,10 +501,10 @@ public TMasterOpResult proxyExecute(TMasterOpRequest request) {
         // no matter the master execute success or fail, the master must transfer the result to follower
         // and tell the follower the current journalID.
         TMasterOpResult result = new TMasterOpResult();
-        if (ctx.queryId() != null &&
+        if (ctx.queryId() != null
                 // If none master FE not set query id or query id was reset in StmtExecutor when a query exec more than once,
                 // return it to none master FE.
-                (!request.isSetQueryId() || !request.getQueryId().equals(ctx.queryId()))
+                && (!request.isSetQueryId() || !request.getQueryId().equals(ctx.queryId()))
         ) {
             result.setQueryId(ctx.queryId());
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java
index 7e75afdf223c02..9a5dd0418bc5da 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java
@@ -139,8 +139,8 @@ public List listConnection(String user) {
         List infos = Lists.newArrayList();
         for (ConnectContext ctx : connectionMap.values()) {
             // Check auth
-            if (!ctx.getQualifiedUser().equals(user) &&
-                    !Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(),
+            if (!ctx.getQualifiedUser().equals(user)
+                    && !Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(),
                             PrivPredicate.GRANT)) {
                 continue;
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
index abe8c26c2e6dfa..5e03947a87671b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
@@ -98,7 +98,6 @@
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multiset;
 import com.google.common.collect.Sets;
-
 import org.apache.commons.collections.map.HashedMap;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
@@ -641,8 +640,8 @@ private void sendFragment() throws TException, RpcException, UserException {
                         switch (code) {
                             case TIMEOUT:
                                 throw new RpcException(pair.first.backend.getHost(), "send fragment timeout. backend id: "
-                                        + pair.first.backend.getId() + " fragment: " +
-                                        DebugUtil.printId(pair.first.rpcParams.params.fragment_instance_id));
+                                        + pair.first.backend.getId() + " fragment: "
+                                        + DebugUtil.printId(pair.first.rpcParams.params.fragment_instance_id));
                             case THRIFT_RPC_ERROR:
                                 SimpleScheduler.addToBlacklist(pair.first.backend.getId(), errMsg);
                                 throw new RpcException(pair.first.backend.getHost(), "rpc failed");
@@ -925,8 +924,8 @@ private void computeFragmentExecParams() throws Exception {
                     && sink.getOutputPartition().isBucketShuffleHashPartition()) {
                 // the destFragment must be bucket shuffle
                 Preconditions.checkState(bucketShuffleJoinController.
-                        isBucketShuffleJoin(destFragment.getFragmentId().asInt()), "Sink is" +
-                        "Bucket Shuffle Partition, The destFragment must have bucket shuffle join node ");
+                        isBucketShuffleJoin(destFragment.getFragmentId().asInt()), "Sink is"
+                        + "Bucket Shuffle Partition, The destFragment must have bucket shuffle join node ");
 
                 int bucketSeq = 0;
                 int bucketNum = bucketShuffleJoinController.getFragmentBucketNum(destFragment.getFragmentId());
@@ -1796,7 +1795,7 @@ private void computeScanRangeAssignmentByBucket(
             if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) {
                 // The bucket shuffle join only hit when the partition is one. so the totalTabletsNum is all tablet of
                 // one hit partition. can be the right bucket num in bucket shuffle join
-                fragmentIdToBucketNumMap.put(scanNode.getFragmentId(), (int)scanNode.getTotalTabletsNum());
+                fragmentIdToBucketNumMap.put(scanNode.getFragmentId(), (int) scanNode.getTotalTabletsNum());
                 fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap());
                 fragmentIdBucketSeqToScanRangeMap.put(scanNode.getFragmentId(), new BucketSeqToScanRange());
                 fragmentIdToBuckendIdBucketCountMap.put(scanNode.getFragmentId(), new HashMap<>());
@@ -2253,7 +2252,7 @@ public List getFragmentInstanceInfos()
         lock();
         try {
             for (int index = 0; index < fragments.size(); index++) {
-                for (BackendExecState backendExecState: backendExecStates) {
+                for (BackendExecState backendExecState : backendExecStates) {
                     if (fragments.get(index).getFragmentId() != backendExecState.fragmentId) {
                         continue;
                     }
@@ -2279,7 +2278,7 @@ private void attachInstanceProfileToFragmentProfile() {
     // Runtime filter target fragment instance param
     static class FRuntimeFilterTargetParam {
         public TUniqueId targetFragmentInstanceId;
-        ;
+
         public TNetworkAddress targetFragmentInstanceAddr;
 
         public FRuntimeFilterTargetParam(TUniqueId id, TNetworkAddress host) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java
index 174f505dac901b..e369463ad57c7f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java
@@ -265,8 +265,8 @@ public static void execute(Catalog catalog, DdlStmt ddlStmt) throws Exception {
             if (!syncJobMgr.isJobNameExist(createSyncJobStmt.getDbName(), createSyncJobStmt.getJobName())) {
                 syncJobMgr.addDataSyncJob((CreateDataSyncJobStmt) ddlStmt);
             } else {
-                throw new DdlException("The syncJob with jobName '" + createSyncJobStmt.getJobName() +
-                        "' in database [" + createSyncJobStmt.getDbName() + "] is already exists.");
+                throw new DdlException("The syncJob with jobName '" + createSyncJobStmt.getJobName()
+                        + "' in database [" + createSyncJobStmt.getDbName() + "] is already exists.");
             }
         } else if (ddlStmt instanceof ResumeSyncJobStmt) {
             catalog.getSyncJobManager().resumeSyncJob((ResumeSyncJobStmt) ddlStmt);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
index e52226efd730b7..1974384dac59df 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
@@ -44,8 +44,8 @@ public final class GlobalVariable {
     public static final String PERFORMANCE_SCHEMA = "performance_schema";
 
     @VariableMgr.VarAttr(name = VERSION_COMMENT, flag = VariableMgr.READ_ONLY)
-    public static String versionComment = "Doris version " +
-            Version.DORIS_BUILD_VERSION + "-" + Version.DORIS_BUILD_SHORT_HASH;
+    public static String versionComment = "Doris version "
+            + Version.DORIS_BUILD_VERSION + "-" + Version.DORIS_BUILD_SHORT_HASH;
 
     @VariableMgr.VarAttr(name = VERSION, flag = VariableMgr.READ_ONLY)
     public static String version = MysqlHandshakePacket.SERVER_VERSION;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/HelpModule.java b/fe/fe-core/src/main/java/org/apache/doris/qe/HelpModule.java
index bb2e6ed4de2b87..a6091e924e7ad9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/HelpModule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/HelpModule.java
@@ -106,7 +106,7 @@ public void setUpByZip(String path) throws IOException, UserException {
                 if (size > 0) {
                     BufferedReader reader = new BufferedReader(new InputStreamReader(zf.getInputStream(entry),
                                                                                      CHARSET_UTF_8));
-                    while ((line = reader.readLine()) != null ) {
+                    while ((line = reader.readLine()) != null) {
                         lines.add(line);
                     }
                     reader.close();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java
index f9fda4a1e29224..d9b19f7533c198 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java
@@ -89,7 +89,7 @@ public void notifyObservers(Long journalId) {
         int pos = upperBound(arrLocal, size, journalId);
         LOG.debug("notify observers: journal: {}, pos: {}, size: {}, obs: {}", journalId, pos, size, obs);
 
-        for (int i = 0; i < pos; i ++) {
+        for (int i = 0; i < pos; i++) {
             JournalObserver observer = ((JournalObserver) arrLocal[i]);
             observer.update();
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java
index 04a2961312ebbc..f6ebc7e0f81ae4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java
@@ -113,7 +113,7 @@ private void forward() throws Exception {
             if (shouldNotRetry || e.getType() == TTransportException.TIMED_OUT) {
                 throw e;
             } else {
-                LOG.warn("Forward statement "+ ctx.getStmtId() +" to Master " + thriftAddress + " twice", e);
+                LOG.warn("Forward statement " + ctx.getStmtId() + " to Master " + thriftAddress + " twice", e);
                 result = client.forward(params);
                 isReturnToPool = true;
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java
index e6b6af986e77e4..a9cc6713ca6cd8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java
@@ -111,7 +111,7 @@ public void load(TMiniLoadRequest request) throws DdlException {
         if (CollectionUtils.isNotEmpty(request.getFileSize())
                 && request.getFileSize().size() != request.getFiles().size()) {
             throw new DdlException("files count and file size count not match: [" + request.getFileSize().size()
-                    + "!=" + request.getFiles().size()+"]");
+                    + "!=" + request.getFiles().size() + "]");
         }
         List> files = Streams.zip(request.getFiles().stream(), request.getFileSize().stream(), Pair::create)
                 .collect(Collectors.toList());
@@ -370,7 +370,7 @@ public LoadStmt toLoadStmt() throws DdlException {
             try {
                 loadStmt.analyze(analyzer);
             } catch (UserException e) {
-               throw new DdlException(e.getMessage());
+                throw new DdlException(e.getMessage());
             }
             return loadStmt;
         }
@@ -438,7 +438,7 @@ public DataDescription toDataDesc() throws DdlException {
             List files = Lists.newArrayList();
             List fileSizes = Lists.newArrayList();
             Iterator>>> it = filesByLabel.entrySet().iterator();
-            while(it.hasNext()) {
+            while (it.hasNext()) {
                 List> value = it.next().getValue();
                 value.stream().forEach(pair -> {
                     files.add(pair.first);
@@ -449,7 +449,7 @@ public DataDescription toDataDesc() throws DdlException {
             PartitionNames partitionNames = null;
             String fileFormat = properties.get(LoadStmt.KEY_IN_PARAM_FORMAT_TYPE);
             boolean isNegative = properties.get(LoadStmt.KEY_IN_PARAM_NEGATIVE) == null ? false :
-                    Boolean.valueOf(properties.get(LoadStmt.KEY_IN_PARAM_NEGATIVE));
+                    Boolean.parseBoolean(properties.get(LoadStmt.KEY_IN_PARAM_NEGATIVE));
             Expr whereExpr = null;
             LoadTask.MergeType mergeType = LoadTask.MergeType.APPEND;
             Expr deleteCondition = null;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java
index c4bff97a6cdfcf..5bc401218d6a83 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java
@@ -94,8 +94,8 @@ public void registerInstances(TUniqueId queryId, Integer instancesNum) throws Us
             throw new UserException("query not exists in coordinatorMap:" + DebugUtil.printId(queryId));
         }
         QueryInfo queryInfo = coordinatorMap.get(queryId);
-        if (queryInfo.getConnectContext() != null &&
-                !Strings.isNullOrEmpty(queryInfo.getConnectContext().getQualifiedUser())
+        if (queryInfo.getConnectContext() != null
+                && !Strings.isNullOrEmpty(queryInfo.getConnectContext().getQualifiedUser())
         ) {
             String user = queryInfo.getConnectContext().getQualifiedUser();
             long maxQueryInstances = queryInfo.getConnectContext().getCatalog().getAuth().getMaxQueryInstances(user);
@@ -126,8 +126,8 @@ public void unregisterQuery(TUniqueId queryId) {
             if (LOG.isDebugEnabled()) {
                 LOG.debug("deregister query id {}", DebugUtil.printId(queryId));
             }
-            if (queryInfo.getConnectContext() != null &&
-                    !Strings.isNullOrEmpty(queryInfo.getConnectContext().getQualifiedUser())
+            if (queryInfo.getConnectContext() != null
+                    && !Strings.isNullOrEmpty(queryInfo.getConnectContext().getQualifiedUser())
             ) {
                 Integer num = queryToInstancesNum.remove(queryId);
                 if (num != null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
index b71884541af27f..859225acb525cc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
@@ -57,7 +57,7 @@ public static synchronized void addOrUpdateQueryDetail(QueryDetail queryDetail)
     public static synchronized List getQueryDetails(long eventTime) {
         List results = Lists.newArrayList();
         Iterator it = totalQueries.iterator();
-        while(it.hasNext()) {
+        while (it.hasNext()) {
             QueryDetail queryDetail = it.next();
             if (queryDetail.getEventTime() > eventTime) {
                 results.add(queryDetail);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStatisticsItem.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStatisticsItem.java
index fdc2fd964fcabf..34ceed6ad591a4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStatisticsItem.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStatisticsItem.java
@@ -83,7 +83,7 @@ public RuntimeProfile getQueryProfile() {
         return queryProfile;
     }
 
-    public boolean getIsReportSucc () {
+    public boolean getIsReportSucc() {
         return isReportSucc;
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java b/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java
index e02bb6200c3b50..94aaa81802721d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java
@@ -39,9 +39,10 @@
 public class RuntimeFilterTypeHelper {
     private static final Logger LOG = LogManager.getLogger(RuntimeFilterTypeHelper.class);
 
-    public final static long ALLOWED_MASK = (TRuntimeFilterType.IN.getValue() |
-            TRuntimeFilterType.BLOOM.getValue() | TRuntimeFilterType.MIN_MAX.getValue() |
-            TRuntimeFilterType.IN_OR_BLOOM.getValue());
+    public final static long ALLOWED_MASK = (TRuntimeFilterType.IN.getValue()
+            | TRuntimeFilterType.BLOOM.getValue()
+            | TRuntimeFilterType.MIN_MAX.getValue()
+            | TRuntimeFilterType.IN_OR_BLOOM.getValue());
 
     private final static Map varValueSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index ce9e9a40d0e956..343f89bc4332e2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -451,6 +451,7 @@ public String getBlockEncryptionMode() {
     public void setBlockEncryptionMode(String blockEncryptionMode) {
         this.blockEncryptionMode = blockEncryptionMode;
     }
+
     public long getMaxExecMemByte() {
         return maxExecMemByte;
     }
@@ -479,7 +480,9 @@ public void setSqlMode(long sqlMode) {
         this.sqlMode = sqlMode;
     }
 
-    public boolean isEnableJoinReorderBasedCost() { return enableJoinReorderBasedCost; }
+    public boolean isEnableJoinReorderBasedCost() {
+        return enableJoinReorderBasedCost;
+    }
 
     public boolean isAutoCommit() {
         return autoCommit;
@@ -596,6 +599,7 @@ public boolean isSqlQuoteShowCreate() {
     public void setSqlQuoteShowCreate(boolean sqlQuoteShowCreate) {
         this.sqlQuoteShowCreate = sqlQuoteShowCreate;
     }
+
     public void setLoadMemLimit(long loadMemLimit) {
         this.loadMemLimit = loadMemLimit;
     }
@@ -632,9 +636,13 @@ public void setPreferJoinMethod(String preferJoinMethod) {
         this.preferJoinMethod = preferJoinMethod;
     }
 
-    public boolean isEnableFoldConstantByBe() { return enableFoldConstantByBe; }
+    public boolean isEnableFoldConstantByBe() {
+        return enableFoldConstantByBe;
+    }
 
-    public void setEnableFoldConstantByBe(boolean foldConstantByBe) {this.enableFoldConstantByBe = foldConstantByBe; }
+    public void setEnableFoldConstantByBe(boolean foldConstantByBe) {
+        this.enableFoldConstantByBe = foldConstantByBe;
+    }
 
     public int getParallelExecInstanceNum() {
         return parallelExecInstanceNum;
@@ -894,7 +902,9 @@ public boolean isEnableInferPredicate() {
         return enableInferPredicate;
     }
 
-    public void setEnableInferPredicate(boolean enableInferPredicate) { this.enableInferPredicate = enableInferPredicate; }
+    public void setEnableInferPredicate(boolean enableInferPredicate) {
+        this.enableInferPredicate = enableInferPredicate;
+    }
 
     public boolean isEnableProjection() {
         return enableProjection;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SetExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SetExecutor.java
index 5039b7a11bced3..a803fb5beb919e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SetExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SetExecutor.java
@@ -45,7 +45,7 @@ private void setVariable(SetVar var) throws DdlException {
             // Set password
             SetPassVar setPassVar = (SetPassVar) var;
             ctx.getCatalog().getAuth().setPassword(setPassVar);
-        } else if(var instanceof SetLdapPassVar){
+        } else if (var instanceof SetLdapPassVar) {
             SetLdapPassVar setLdapPassVar = (SetLdapPassVar) var;
             ctx.getCatalog().getAuth().setLdapPassword(setLdapPassVar);
         } else if (var instanceof SetNamesVar) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
index 56d6dea6636283..580b79378eb08f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
@@ -408,8 +408,7 @@ private void handleShowEngines() {
     private void handleShowFunctions() throws AnalysisException {
         ShowFunctionsStmt showStmt = (ShowFunctionsStmt) stmt;
         Database db = ctx.getCatalog().getDbOrAnalysisException(showStmt.getDbName());
-        List functions = showStmt.getIsBuiltin() ? ctx.getCatalog().getBuiltinFunctions() :
-            db.getFunctions();
+        List functions = showStmt.getIsBuiltin() ? ctx.getCatalog().getBuiltinFunctions() : db.getFunctions();
 
         List> rowSet = Lists.newArrayList();
         for (Function function : functions) {
@@ -443,8 +442,8 @@ private void handleShowFunctions() throws AnalysisException {
 
         // Only success
         ShowResultSetMetaData showMetaData = showStmt.getIsVerbose() ? showStmt.getMetaData() :
-            ShowResultSetMetaData.builder()
-                .addColumn(new Column("Function Name", ScalarType.createVarchar(256))).build();
+                ShowResultSetMetaData.builder()
+                        .addColumn(new Column("Function Name", ScalarType.createVarchar(256))).build();
         resultSet = new ShowResultSet(showMetaData, resultRowSet);
     }
 
@@ -1339,7 +1338,6 @@ private void handleShowDelete() throws AnalysisException {
         long dbId = db.getId();
 
         DeleteHandler deleteHandler = catalog.getDeleteHandler();
-        Load load = catalog.getLoadInstance();
         List> deleteInfos = deleteHandler.getDeleteInfosByDb(dbId);
         List> rows = Lists.newArrayList();
         for (List deleteInfo : deleteInfos) {
@@ -1398,7 +1396,7 @@ private void handleShowPartitions() throws AnalysisException {
         ProcNodeInterface procNodeI = showStmt.getNode();
         Preconditions.checkNotNull(procNodeI);
         List> rows = ((PartitionsProcDir) procNodeI).fetchResultByFilter(showStmt.getFilterMap(),
-            showStmt.getOrderByPairs(), showStmt.getLimitElement()).getRows();
+                showStmt.getOrderByPairs(), showStmt.getLimitElement()).getRows();
         resultSet = new ShowResultSet(showStmt.getMetaData(), rows);
     }
 
@@ -1545,7 +1543,7 @@ private void handleShowTablet() throws AnalysisException {
                 if (sizeLimit > -1 && tabletInfos.size() < sizeLimit) {
                     tabletInfos.clear();
                 } else if (sizeLimit > -1) {
-                    tabletInfos = tabletInfos.subList((int)showStmt.getOffset(), (int)sizeLimit);
+                    tabletInfos = tabletInfos.subList((int) showStmt.getOffset(), (int) sizeLimit);
                 }
 
                 // order by
@@ -1648,7 +1646,7 @@ private void handleShowExport() throws AnalysisException {
             states = Sets.newHashSet(state);
         }
         List> infos = exportMgr.getExportJobInfosByIdOrState(
-                dbId, showExportStmt.getJobId(), showExportStmt.getLabel(),showExportStmt.isLabelUseLike(),  states,
+                dbId, showExportStmt.getJobId(), showExportStmt.getLabel(), showExportStmt.isLabelUseLike(), states,
                 showExportStmt.getOrderByPairs(), showExportStmt.getLimit());
 
         resultSet = new ShowResultSet(showExportStmt.getMetaData(), infos);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowResultSet.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowResultSet.java
index ccd0b8cfa2492b..b373529349b79d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowResultSet.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowResultSet.java
@@ -41,7 +41,7 @@ public ShowResultSet(ResultSetMetaData metaData, List> resultRows)
 
     public ShowResultSet(TShowResultSet resultSet) {
         List columns = Lists.newArrayList();
-        for (int i = 0; i < resultSet.getMetaData().getColumnsSize(); i ++) {
+        for (int i = 0; i < resultSet.getMetaData().getColumnsSize(); i++) {
             TColumnDefinition definition = (TColumnDefinition) resultSet.getMetaData().getColumns().get(i);
             columns.add(new Column(
                             definition.getColumnName(),
@@ -56,7 +56,7 @@ public ShowResultSet(TShowResultSet resultSet) {
     public TShowResultSet tothrift() {
         TShowResultSet set = new TShowResultSet();
         set.metaData = new TShowResultSetMetaData();
-        for (int i = 0; i < metaData.getColumnCount(); i ++) {
+        for (int i = 0; i < metaData.getColumnCount(); i++) {
             Column definition = metaData.getColumn(i);
             set.metaData.addToColumns(new TColumnDefinition(
                     definition.getName(), definition.getOriginType().toColumnTypeThrift())
@@ -64,9 +64,9 @@ public TShowResultSet tothrift() {
         }
 
         set.resultRows = Lists.newArrayList();
-        for (int i = 0; i < resultRows.size(); i ++) {
+        for (int i = 0; i < resultRows.size(); i++) {
             ArrayList list = Lists.newArrayList();
-            for (int j = 0; j < resultRows.get(i).size(); j ++) {
+            for (int j = 0; j < resultRows.get(i).size(); j++) {
                 list.add(resultRows.get(i).get(j) == null ? FeConstants.null_string : resultRows.get(i).get(j));
             }
             set.resultRows.add(list);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java
index cba43888db7d91..d69b7e2efb0830 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java
@@ -86,8 +86,8 @@ public static TNetworkAddress getHost(long backendId,
         }
 
         // no backend returned
-        throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG +
-                getBackendErrorMsg(locations.stream().map(l -> l.backend_id).collect(Collectors.toList()),
+        throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG
+                + getBackendErrorMsg(locations.stream().map(l -> l.backend_id).collect(Collectors.toList()),
                         backends, locations.size()));
     }
 
@@ -118,8 +118,8 @@ public static TScanRangeLocation getLocation(TScanRangeLocation minLocation,
         }
 
         // no backend returned
-        throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG +
-                getBackendErrorMsg(locations.stream().map(l -> l.backend_id).collect(Collectors.toList()),
+        throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG
+                + getBackendErrorMsg(locations.stream().map(l -> l.backend_id).collect(Collectors.toList()),
                         backends, locations.size()));
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java
index bf3f2f146d6775..80e45973007fd6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java
@@ -60,7 +60,7 @@ public class SqlModeHelper {
     public static final long MODE_NO_ZERO_DATE = 1L << 24;
     public static final long MODE_INVALID_DATES = 1L << 25;
     public static final long MODE_ERROR_FOR_DIVISION_BY_ZERO = 1L << 26;
-    public static final long MODE_HIGH_NOT_PRECEDENCE = 1L <<29;
+    public static final long MODE_HIGH_NOT_PRECEDENCE = 1L << 29;
     public static final long MODE_NO_ENGINE_SUBSTITUTION = 1L << 30;
     public static final long MODE_PAD_CHAR_TO_FULL_LENGTH = 1L << 31;
     public static final long MODE_TIME_TRUNCATE_FRACTIONAL = 1L << 32;
@@ -75,15 +75,12 @@ public class SqlModeHelper {
     public final static long MODE_DEFAULT = 0L;
 
     public final static long MODE_ALLOWED_MASK =
-            (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
-                    MODE_IGNORE_SPACE | MODE_NOT_USED | MODE_ONLY_FULL_GROUP_BY |
-                    MODE_NO_UNSIGNED_SUBTRACTION | MODE_NO_DIR_IN_CREATE |
-                    MODE_NO_AUTO_VALUE_ON_ZERO | MODE_NO_BACKSLASH_ESCAPES |
-                    MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES | MODE_NO_ZERO_IN_DATE |
-                    MODE_NO_ZERO_DATE | MODE_INVALID_DATES | MODE_ERROR_FOR_DIVISION_BY_ZERO |
-                    MODE_HIGH_NOT_PRECEDENCE | MODE_NO_ENGINE_SUBSTITUTION |
-                    MODE_PAD_CHAR_TO_FULL_LENGTH | MODE_TRADITIONAL | MODE_ANSI |
-                    MODE_TIME_TRUNCATE_FRACTIONAL);
+            (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NOT_USED
+                    | MODE_ONLY_FULL_GROUP_BY | MODE_NO_UNSIGNED_SUBTRACTION | MODE_NO_DIR_IN_CREATE
+                    | MODE_NO_AUTO_VALUE_ON_ZERO | MODE_NO_BACKSLASH_ESCAPES | MODE_STRICT_TRANS_TABLES
+                    | MODE_STRICT_ALL_TABLES | MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES
+                    | MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_HIGH_NOT_PRECEDENCE | MODE_NO_ENGINE_SUBSTITUTION
+                    | MODE_PAD_CHAR_TO_FULL_LENGTH | MODE_TRADITIONAL | MODE_ANSI | MODE_TIME_TRUNCATE_FRACTIONAL);
 
     public final static long MODE_COMBINE_MASK = (MODE_ANSI | MODE_TRADITIONAL);
 
@@ -115,11 +112,11 @@ public class SqlModeHelper {
         sqlModeSet.put("PAD_CHAR_TO_FULL_LENGTH", MODE_PAD_CHAR_TO_FULL_LENGTH);
         sqlModeSet.put("TIME_TRUNCATE_FRACTIONAL", MODE_TIME_TRUNCATE_FRACTIONAL);
 
-        combineModeSet.put("ANSI", (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT |
-                MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_ONLY_FULL_GROUP_BY));
-        combineModeSet.put("TRADITIONAL", (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES |
-                MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_ERROR_FOR_DIVISION_BY_ZERO |
-                MODE_NO_ENGINE_SUBSTITUTION));
+        combineModeSet.put("ANSI", (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT
+                | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_ONLY_FULL_GROUP_BY));
+        combineModeSet.put("TRADITIONAL", (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES
+                | MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_ERROR_FOR_DIVISION_BY_ZERO
+                | MODE_NO_ENGINE_SUBSTITUTION));
     }
 
     // convert long type SQL MODE to string type that user can read
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
index b4022f6838b4e7..d055b7a8b19e12 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
@@ -223,8 +223,8 @@ private void initProfile(QueryPlannerProfile plannerProfile, boolean waiteBeRepo
             summaryProfile.addInfoString(ProfileManager.TOTAL_TIME, DebugUtil.getPrettyStringMs(totalTimeMs));
             summaryProfile.addInfoString(ProfileManager.QUERY_TYPE, queryType);
             summaryProfile.addInfoString(ProfileManager.QUERY_STATE,
-                    !waiteBeReport && context.getState().getStateType().equals(MysqlStateType.OK) ?
-                            "RUNNING" : context.getState().toString());
+                    !waiteBeReport && context.getState().getStateType().equals(MysqlStateType.OK)
+                            ? "RUNNING" : context.getState().toString());
             summaryProfile.addInfoString(ProfileManager.DORIS_VERSION, Version.DORIS_BUILD_VERSION);
             summaryProfile.addInfoString(ProfileManager.USER, context.getQualifiedUser());
             summaryProfile.addInfoString(ProfileManager.DEFAULT_DB, context.getDatabase());
@@ -239,8 +239,8 @@ private void initProfile(QueryPlannerProfile plannerProfile, boolean waiteBeRepo
                     waiteBeReport ? TimeUtils.longToTimeString(currentTimestamp) : "N/A");
             summaryProfile.addInfoString(ProfileManager.TOTAL_TIME, DebugUtil.getPrettyStringMs(totalTimeMs));
             summaryProfile.addInfoString(ProfileManager.QUERY_STATE,
-                    !waiteBeReport && context.getState().getStateType().equals(MysqlStateType.OK) ?
-                            "RUNNING" : context.getState().toString());
+                    !waiteBeReport && context.getState().getStateType().equals(MysqlStateType.OK)
+                            ? "RUNNING" : context.getState().toString());
         }
         plannerProfile.initRuntimeProfile(plannerRuntimeProfile);
 
@@ -338,8 +338,8 @@ public void execute(TUniqueId queryId) throws Exception {
                         // If goes here, which means we can't find a valid Master FE(some error happens).
                         // To avoid endless forward, throw exception here.
                         throw new UserException("The statement has been forwarded to master FE("
-                                + Catalog.getCurrentCatalog().getSelfNode().first + ") and failed to execute" +
-                                " because Master FE is not ready. You may need to check FE's status");
+                                + Catalog.getCurrentCatalog().getSelfNode().first + ") and failed to execute"
+                                + " because Master FE is not ready. You may need to check FE's status");
                     }
                     forwardToMaster();
                     if (masterOpExecutor != null && masterOpExecutor.getQueryId() != null) {
@@ -1144,8 +1144,8 @@ public int executeForTxn(InsertStmt insertStmt)
         if (context.isTxnIniting()) { // first time, begin txn
             beginTxn(insertStmt.getDb(), insertStmt.getTbl());
         }
-        if (!context.getTxnEntry().getTxnConf().getDb().equals(insertStmt.getDb()) ||
-                !context.getTxnEntry().getTxnConf().getTbl().equals(insertStmt.getTbl())) {
+        if (!context.getTxnEntry().getTxnConf().getDb().equals(insertStmt.getDb())
+                || !context.getTxnEntry().getTxnConf().getTbl().equals(insertStmt.getTbl())) {
             throw new TException("Only one table can be inserted in one transaction.");
         }
 
@@ -1315,18 +1315,17 @@ private void handleInsertStmt() throws Exception {
                 LOG.debug("delta files is {}", coord.getDeltaUrls());
 
                 if (coord.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL) != null) {
-                    loadedRows = Long.valueOf(coord.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL));
+                    loadedRows = Long.parseLong(coord.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL));
                 }
                 if (coord.getLoadCounters().get(LoadEtlTask.DPP_ABNORMAL_ALL) != null) {
-                    filteredRows = Integer.valueOf(coord.getLoadCounters().get(LoadEtlTask.DPP_ABNORMAL_ALL));
+                    filteredRows = Integer.parseInt(coord.getLoadCounters().get(LoadEtlTask.DPP_ABNORMAL_ALL));
                 }
 
                 // if in strict mode, insert will fail if there are filtered rows
                 if (context.getSessionVariable().getEnableInsertStrict()) {
                     if (filteredRows > 0) {
-                        context.getState().setError(ErrorCode.ERR_FAILED_WHEN_INSERT, "Insert has filtered data in strict mode, " +
-                                "tracking_url="
-                                + coord.getTrackingUrl());
+                        context.getState().setError(ErrorCode.ERR_FAILED_WHEN_INSERT,
+                                "Insert has filtered data in strict mode, tracking_url=" + coord.getTrackingUrl());
                         return;
                     }
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
index 27d30f4a560552..8c7d794123cb93 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
@@ -221,9 +221,10 @@ private CacheMode innerCheckCacheMode(long now) {
         if (now == 0) {
             now = nowtime();
         }
-        if (enableSqlCache() &&
-                (now - latestTable.latestTime) >= Config.cache_last_version_interval_second * 1000) {
-            LOG.debug("TIME:{},{},{}", now, latestTable.latestTime, Config.cache_last_version_interval_second*1000);
+        if (enableSqlCache()
+                && (now - latestTable.latestTime) >= Config.cache_last_version_interval_second * 1000L) {
+            LOG.debug("TIME:{},{},{}", now, latestTable.latestTime,
+                    Config.cache_last_version_interval_second * 1000);
             cache = new SqlCache(this.queryId, this.selectStmt);
             ((SqlCache) cache).setCacheInfo(this.latestTable, allViewExpandStmtListStr);
             MetricRepo.COUNTER_CACHE_MODE_SQL.increase(1L);
@@ -339,14 +340,14 @@ private void getPartitionKeyFromWhereClause(Expr expr, Column partColumn,
         if (expr instanceof CompoundPredicate) {
             CompoundPredicate cp = (CompoundPredicate) expr;
             if (cp.getOp() == CompoundPredicate.Operator.AND) {
-                if (cp.getChildren().size() == 2 && cp.getChild(0) instanceof BinaryPredicate &&
-                        cp.getChild(1) instanceof BinaryPredicate) {
+                if (cp.getChildren().size() == 2 && cp.getChild(0) instanceof BinaryPredicate
+                        && cp.getChild(1) instanceof BinaryPredicate) {
                     BinaryPredicate leftPre = (BinaryPredicate) cp.getChild(0);
                     BinaryPredicate rightPre = (BinaryPredicate) cp.getChild(1);
                     String leftColumn = getColumnName(leftPre);
                     String rightColumn = getColumnName(rightPre);
-                    if (leftColumn.equalsIgnoreCase(partColumn.getName()) &&
-                            rightColumn.equalsIgnoreCase(partColumn.getName())) {
+                    if (leftColumn.equalsIgnoreCase(partColumn.getName())
+                            && rightColumn.equalsIgnoreCase(partColumn.getName())) {
                         compoundPredicates.add(cp);
                     }
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java
index cb30cbb576d626..bb65e69bcca989 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java
@@ -109,6 +109,7 @@ public void clearCache(InternalService.PClearCacheRequest request, List
                 try {
                     Thread.sleep(1000); //sleep 1 second
                 } catch (Exception e) {
+                    // CHECKSTYLE IGNORE THIS LINE
                 }
             }
             if (retry >= 3) {
@@ -135,7 +136,6 @@ protected boolean clearCache(InternalService.PClearCacheRequest request, Backend
             }
         } catch (Exception e) {
             LOG.warn("clear cache exception, backendId {}", backend.getId(), e);
-        } finally {
         }
         return false;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java
index d4194f380f5723..1ae7845dbe2d1b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java
@@ -244,6 +244,7 @@ private Date getDateValue(LiteralExpr expr) {
             try {
                 dt = df8.parse(String.valueOf(value));
             } catch (Exception e) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
             return dt;
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java
index 9b8ee0d8e0681a..1daa160671d6ed 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java
@@ -85,7 +85,7 @@ public Expr rewrite(Expr expr, Analyzer analyzer, ClauseType clauseType) throws
         Expr rewrittenExpr = expr;
         do {
             oldNumChanges = numChanges;
-            for (ExprRewriteRule rule: rules) {
+            for (ExprRewriteRule rule : rules) {
                 // when foldConstantByBe is on, fold all constant expr by BE instead of applying FoldConstantsRule in FE.
                 if (rule instanceof FoldConstantsRule && analyzer.safeIsEnableFoldConstantByBe()) {
                     continue;
@@ -94,7 +94,7 @@ public Expr rewrite(Expr expr, Analyzer analyzer, ClauseType clauseType) throws
             }
         } while (oldNumChanges != numChanges);
 
-        for (ExprRewriteRule rule: onceRules) {
+        for (ExprRewriteRule rule : onceRules) {
             rewrittenExpr = applyRuleOnce(rewrittenExpr, rule, analyzer, clauseType);
         }
         return rewrittenExpr;
@@ -164,7 +164,15 @@ public void rewriteList(List exprs, Analyzer analyzer) throws AnalysisExce
         }
     }
 
-    public void reset() { numChanges = 0; }
-    public boolean changed() { return numChanges > 0; }
-    public int getNumChanges() { return numChanges; }
+    public void reset() {
+        numChanges = 0;
+    }
+
+    public boolean changed() {
+        return numChanges > 0;
+    }
+
+    public int getNumChanges() {
+        return numChanges;
+    }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java
index 406d5451783f4c..dca24e664b73dc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java
@@ -198,8 +198,8 @@ private Expr findWideRangeExpr(List> exprList) {
                 }
                 SlotRef columnName = (SlotRef) predicate.getChild(0);
                 if (predicate instanceof BinaryPredicate) {
-                    Range predicateRange = ((BinaryPredicate)predicate).convertToRange();
-                    if (predicateRange == null){
+                    Range predicateRange = ((BinaryPredicate) predicate).convertToRange();
+                    if (predicateRange == null) {
                         continue;
                     }
                     Range range = columnNameToRange.get(columnName);
@@ -233,7 +233,7 @@ private Expr findWideRangeExpr(List> exprList) {
 
         // 2. merge clause
         Map> resultRangeMap = Maps.newHashMap();
-        for (Map.Entry> entry: columnNameToRangeList.get(0).entrySet()) {
+        for (Map.Entry> entry : columnNameToRangeList.get(0).entrySet()) {
             RangeSet rangeSet = TreeRangeSet.create();
             rangeSet.add(entry.getValue());
             resultRangeMap.put(entry.getKey(), rangeSet);
@@ -310,7 +310,7 @@ private boolean singleColumnPredicate(Expr expr) {
     private Map> mergeTwoClauseRange(Map> clause1,
                                                                     Map> clause2) {
         Map> result = Maps.newHashMap();
-        for (Map.Entry> clause1Entry: clause1.entrySet()) {
+        for (Map.Entry> clause1Entry : clause1.entrySet()) {
             SlotRef columnName = clause1Entry.getKey();
             Range clause2Value = clause2.get(columnName);
             if (clause2Value == null) {
@@ -336,7 +336,7 @@ private Map> mergeTwoClauseRange(Map mergeTwoClauseIn(Map clause1,
                                                        Map clause2) {
         Map result = Maps.newHashMap();
-        for (Map.Entry clause1Entry: clause1.entrySet()) {
+        for (Map.Entry clause1Entry : clause1.entrySet()) {
             SlotRef columnName = clause1Entry.getKey();
             InPredicate clause2Value = clause2.get(columnName);
             if (clause2Value == null) {
@@ -434,7 +434,7 @@ public Expr rangeSetToCompoundPredicate(SlotRef slotRef, RangeSet r
                     binaryPredicateList.add(new BinaryPredicate(BinaryPredicate.Operator.GE, slotRef, lowerBound));
                 }
             }
-            if (upperBound !=null) {
+            if (upperBound != null) {
                 if (range.upperBoundType() == BoundType.OPEN) {
                     binaryPredicateList.add(new BinaryPredicate(BinaryPredicate.Operator.LT, slotRef, upperBound));
                 } else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java
index d9932ff27f90ad..795fa447deb03e 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java
@@ -338,7 +338,7 @@ public static DateLiteral timestamp(LiteralExpr arg) throws AnalysisException {
 
     @FEFunction(name = "floor", argTypes = { "DOUBLE"}, returnType = "BIGINT")
     public static IntLiteral floor(LiteralExpr expr) throws AnalysisException {
-        long result = (long)Math.floor(expr.getDoubleValue());
+        long result = (long) Math.floor(expr.getDoubleValue());
         return new IntLiteral(result, Type.BIGINT);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java
index 563a3433b5ada6..0db26aadf03ddb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java
@@ -143,7 +143,7 @@ public boolean apply(Map exprMap, Analyzer analyzer, boolean chang
         Map> sysVarsMap = new HashMap<>();
         // map to collect InformationFunction
         Map> infoFnsMap = new HashMap<>();
-        for (Map.Entry entry : exprMap.entrySet()){
+        for (Map.Entry entry : exprMap.entrySet()) {
             Map constMap = new HashMap<>();
             Map oriConstMap = new HashMap<>();
             Map sysVarMap = new HashMap<>();
@@ -193,7 +193,7 @@ public boolean apply(Map exprMap, Analyzer analyzer, boolean chang
      * @throws AnalysisException
      */
     // public only for unit test
-    public void getConstExpr(Expr expr, Map constExprMap, Map oriConstMap,
+    public void getConstExpr(Expr expr, Map constExprMap, Map oriConstMap,
                               Analyzer analyzer, Map sysVarMap, Map infoFnMap)
             throws AnalysisException {
         if (expr.isConstant()) {
@@ -231,7 +231,7 @@ public void getConstExpr(Expr expr, Map constExprMap, Map constExprMap, Map oriConstMap,
+    private void recursiveGetChildrenConstExpr(Expr expr, Map constExprMap, Map oriConstMap,
                                                Analyzer analyzer, Map sysVarMap,
                                                Map infoFnMap)throws AnalysisException {
         for (int i = 0; i < expr.getChildren().size(); i++) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java
index edfae209057fd8..ef8f12e5b33b23 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java
@@ -157,7 +157,7 @@ private void initAllStructure(Expr conjunct,
                                   Analyzer analyzer,
                                   ExprRewriter.ClauseType clauseType) {
         if (conjunct instanceof CompoundPredicate
-            && ((CompoundPredicate) conjunct).getOp() == CompoundPredicate.Operator.AND) {
+                && ((CompoundPredicate) conjunct).getOp() == CompoundPredicate.Operator.AND) {
             for (int index = 0; index < conjunct.getChildren().size(); ++index) {
                 initAllStructure(conjunct.getChild(index), slotEqSlotExpr,
                         slotEqSlotDeDuplication, slotToLiteralExpr,
@@ -168,8 +168,8 @@ private void initAllStructure(Expr conjunct,
         }
 
         if (conjunct instanceof BinaryPredicate
-            && conjunct.getChild(0) != null
-            && conjunct.getChild(1) != null) {
+                && conjunct.getChild(0) != null
+                && conjunct.getChild(1) != null) {
             if (conjunct.getChild(0).unwrapSlotRef() instanceof SlotRef
                     && conjunct.getChild(1) instanceof LiteralExpr) {
                 Pair pair = new Pair<>(conjunct.getChild(0).unwrapSlotRef(), conjunct.getChild(1));
@@ -189,7 +189,7 @@ private void initAllStructure(Expr conjunct,
                 Pair eqPair = new Pair<>(conjunct.getChild(1).unwrapSlotRef(),
                                                      conjunct.getChild(0).unwrapSlotRef());
                 if (!slotEqSlotDeDuplication.contains(pair)
-                    && !slotEqSlotDeDuplication.contains(eqPair)) {
+                        && !slotEqSlotDeDuplication.contains(eqPair)) {
                     slotEqSlotDeDuplication.add(pair);
                     slotEqSlotExpr.add(conjunct);
                     if (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) {
@@ -202,7 +202,7 @@ private void initAllStructure(Expr conjunct,
                     && conjunct.getChild(0) != null
                     && conjunct.getChild(0).unwrapSlotRef() instanceof SlotRef) {
             if (!isNullDeDuplication.contains(conjunct.getChild(0).unwrapSlotRef())
-                && ((IsNullPredicate) conjunct).isNotNull()) {
+                    && ((IsNullPredicate) conjunct).isNotNull()) {
                 isNullDeDuplication.add(conjunct.getChild(0).unwrapSlotRef());
                 isNullExpr.add(conjunct);
                 if (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) {
@@ -344,26 +344,22 @@ private void buildNewSlotEqSlotPredicate(List> newSlots,
                                              Analyzer analyzer,
                                              ExprRewriter.ClauseType clauseType) {
         for (Pair slotPair : newSlots) {
-           Pair pair = new Pair<>(
-                   warshallArraySubscriptToExpr.get(slotPair.first), warshallArraySubscriptToExpr.get(slotPair.second));
-           Pair eqPair = new Pair<>(
-                   warshallArraySubscriptToExpr.get(slotPair.second), warshallArraySubscriptToExpr.get(slotPair.first));
-           if (!slotEqSlotDeDuplication.contains(pair)
-                && !slotEqSlotDeDuplication.contains(eqPair)) {
-               slotEqSlotDeDuplication.add(pair);
-               slotEqSlotExpr.add(
-                       new BinaryPredicate(BinaryPredicate.Operator.EQ,
-                               warshallArraySubscriptToExpr.get(slotPair.first),
-                               warshallArraySubscriptToExpr.get(slotPair.second)));
-               if (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) {
-                   analyzer.registerOnSlotEqSlotDeDuplication(pair);
-                   analyzer.registerOnSlotEqSlotExpr(
-                           new BinaryPredicate(BinaryPredicate.Operator.EQ,
-                                   warshallArraySubscriptToExpr.get(slotPair.first),
-                                   warshallArraySubscriptToExpr.get(slotPair.second))
-                   );
-               }
-           }
+            Pair pair = new Pair<>(warshallArraySubscriptToExpr.get(slotPair.first),
+                    warshallArraySubscriptToExpr.get(slotPair.second));
+            Pair eqPair = new Pair<>(warshallArraySubscriptToExpr.get(slotPair.second),
+                    warshallArraySubscriptToExpr.get(slotPair.first));
+            if (!slotEqSlotDeDuplication.contains(pair) && !slotEqSlotDeDuplication.contains(eqPair)) {
+                slotEqSlotDeDuplication.add(pair);
+                slotEqSlotExpr.add(new BinaryPredicate(BinaryPredicate.Operator.EQ,
+                        warshallArraySubscriptToExpr.get(slotPair.first),
+                        warshallArraySubscriptToExpr.get(slotPair.second)));
+                if (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) {
+                    analyzer.registerOnSlotEqSlotDeDuplication(pair);
+                    analyzer.registerOnSlotEqSlotExpr(new BinaryPredicate(BinaryPredicate.Operator.EQ,
+                            warshallArraySubscriptToExpr.get(slotPair.first),
+                            warshallArraySubscriptToExpr.get(slotPair.second)));
+                }
+            }
         }
     }
 
@@ -397,8 +393,7 @@ private void buildNewBinaryPredicate(Expr slotToLiteral,
                 SlotRef leftSlot = conjunct.getChild(0).unwrapSlotRef();
                 SlotRef rightSlot = conjunct.getChild(1).unwrapSlotRef();
 
-                if (leftSlot instanceof SlotRef
-                    && rightSlot instanceof SlotRef) {
+                if (leftSlot instanceof SlotRef && rightSlot instanceof SlotRef) {
                     if (checkSlot.notCheckDescIdEquals(leftSlot)) {
                         addNewBinaryPredicate(genNewBinaryPredicate(slotToLiteral, rightSlot),
                                 slotToLiteralDeDuplication, newExprWithState,
@@ -450,16 +445,18 @@ private boolean checkNeedInfer(JoinOperator joinOperator, boolean needChange, Ex
         boolean ret = false;
         if (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) {
             if (joinOperator.isInnerJoin()
-                || (joinOperator == JoinOperator.LEFT_SEMI_JOIN)
-                || (!needChange && joinOperator == JoinOperator.RIGHT_OUTER_JOIN)
-                || (needChange && (joinOperator == JoinOperator.LEFT_OUTER_JOIN || joinOperator == JoinOperator.LEFT_ANTI_JOIN))) {
+                    || (joinOperator == JoinOperator.LEFT_SEMI_JOIN)
+                    || (!needChange && joinOperator == JoinOperator.RIGHT_OUTER_JOIN)
+                    || (needChange && (joinOperator == JoinOperator.LEFT_OUTER_JOIN
+                    || joinOperator == JoinOperator.LEFT_ANTI_JOIN))) {
                 ret = true;
             }
         } else if (clauseType == ExprRewriter.ClauseType.WHERE_CLAUSE) {
             if (joinOperator.isInnerJoin()
-                || (joinOperator == JoinOperator.LEFT_SEMI_JOIN
-                || (needChange && joinOperator == JoinOperator.RIGHT_OUTER_JOIN))
-                || (!needChange && (joinOperator == JoinOperator.LEFT_OUTER_JOIN || joinOperator == JoinOperator.LEFT_ANTI_JOIN))) {
+                    || (joinOperator == JoinOperator.LEFT_SEMI_JOIN
+                    || (needChange && joinOperator == JoinOperator.RIGHT_OUTER_JOIN))
+                    || (!needChange && (joinOperator == JoinOperator.LEFT_OUTER_JOIN
+                    || joinOperator == JoinOperator.LEFT_ANTI_JOIN))) {
                 ret = true;
             }
         }
@@ -484,7 +481,7 @@ private Expr genNewBinaryPredicate(Expr oldExpr, Expr newSlot) {
      */
     private void addNewBinaryPredicate(Expr expr,
                                        Set> slotToLiteralDeDuplication,
-                                       List > newExprWithState,
+                                       List> newExprWithState,
                                        boolean needAddnewExprWithState,
                                        Analyzer analyzer,
                                        ExprRewriter.ClauseType clauseType) {
@@ -531,15 +528,14 @@ private void buildNewIsNotNullPredicate(Expr expr,
                                             Analyzer analyzer,
                                             ExprRewriter.ClauseType clauseType) {
         if (expr instanceof IsNullPredicate) {
-            IsNullPredicate isNullPredicate = (IsNullPredicate)expr;
+            IsNullPredicate isNullPredicate = (IsNullPredicate) expr;
             SlotRef checkSlot = isNullPredicate.getChild(0).unwrapSlotRef();
             if (checkSlot instanceof SlotRef) {
                 for (Expr conjunct : slotEqSlotExpr) {
                     SlotRef leftSlot = conjunct.getChild(0).unwrapSlotRef();
                     SlotRef rightSlot = conjunct.getChild(1).unwrapSlotRef();
 
-                    if (leftSlot instanceof SlotRef
-                        && rightSlot instanceof SlotRef) {
+                    if (leftSlot instanceof SlotRef && rightSlot instanceof SlotRef) {
                         if (checkSlot.notCheckDescIdEquals(leftSlot) && isNullPredicate.isNotNull()) {
                             addNewIsNotNullPredicate(genNewIsNotNullPredicate(isNullPredicate, rightSlot),
                                     isNullDeDuplication, newExprWithState, analyzer, clauseType);
@@ -619,8 +615,7 @@ private void buildNewInPredicate(Expr inExpr,
                     SlotRef leftSlot = conjunct.getChild(0).unwrapSlotRef();
                     SlotRef rightSlot = conjunct.getChild(1).unwrapSlotRef();
 
-                    if (leftSlot instanceof SlotRef
-                        && rightSlot instanceof SlotRef) {
+                    if (leftSlot instanceof SlotRef && rightSlot instanceof SlotRef) {
                         if (checkSlot.notCheckDescIdEquals(leftSlot)) {
                             addNewInPredicate(genNewInPredicate(inpredicate, rightSlot),
                                     inDeDuplication, newExprWithState,
@@ -656,7 +651,7 @@ private Expr genNewInPredicate(Expr oldExpr, Expr newSlot) {
      */
     private void addNewInPredicate(Expr expr,
                                    Set inDeDuplication,
-                                   List > newExprWithState,
+                                   List> newExprWithState,
                                    boolean needAddnewExprWithState,
                                    Analyzer analyzer,
                                    ExprRewriter.ClauseType clauseType) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteAliasFunctionRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteAliasFunctionRule.java
index e7bfa60a051a32..faa9fa2cacab20 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteAliasFunctionRule.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteAliasFunctionRule.java
@@ -28,7 +28,7 @@
 /**
  * rewrite alias function to real function
  */
-public class RewriteAliasFunctionRule implements ExprRewriteRule{
+public class RewriteAliasFunctionRule implements ExprRewriteRule {
     public static RewriteAliasFunctionRule INSTANCE = new RewriteAliasFunctionRule();
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/mvrewrite/NDVToHll.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/mvrewrite/NDVToHll.java
index 355b591c42eb15..652a8e2d2d7aac 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/mvrewrite/NDVToHll.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/mvrewrite/NDVToHll.java
@@ -47,7 +47,7 @@
  *    or  select k1, approx_count_distinct(k2) from table group by k1
  * Rewritten query: select k1, hll_union_agg(mv_hll_union_k2) from table group by k1
  */
-public class NDVToHll implements ExprRewriteRule{
+public class NDVToHll implements ExprRewriteRule {
     public static final ExprRewriteRule INSTANCE = new NDVToHll();
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
index c04d15214a3b8e..d4d90782b0cf21 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
@@ -834,6 +834,7 @@ private void loadTxnPreCommitImpl(TLoadTxnCommitRequest request) throws UserExce
         }
 
         if (request.isSetAuthCode()) {
+            // CHECKSTYLE IGNORE THIS LINE
         } else if (request.isSetAuthCodeUuid()) {
             checkAuthCodeUuid(request.getDb(), request.getTxnId(), request.getAuthCodeUuid());
         } else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java
index e91a6ddc5fdde2..c4f8a23f761157 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java
@@ -45,7 +45,7 @@ public class OlapScanStatsDerive extends BaseStatsDerive {
     public void init(PlanNode node) throws UserException {
         Preconditions.checkState(node instanceof OlapScanNode);
         super.init(node);
-        buildStructure((OlapScanNode)node);
+        buildStructure((OlapScanNode) node);
     }
 
     @Override
@@ -72,7 +72,7 @@ public void buildStructure(OlapScanNode node) {
         slotIdToDataSize = new HashMap<>();
         slotIdToNdv = new HashMap<>();
         if (node.getTupleDesc() != null
-            && node.getTupleDesc().getTable() != null) {
+                && node.getTupleDesc().getTable() != null) {
             long tableId = node.getTupleDesc().getTable().getId();
             inputRowCount = Catalog.getCurrentCatalog().getStatisticsManager()
                     .getStatistics().getTableStats(tableId).getRowCount();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJob.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJob.java
index 2b77221d706cda..8888a4b486b234 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJob.java
@@ -186,9 +186,7 @@ private void unprotectedUpdateJobState(JobState newState) throws DdlException {
                 default:
                     throw new DdlException("Invalid job state transition from " + jobState + " to " + newState);
             }
-        }
-        // SCHEDULING -> RUNNING/FAILED/CANCELLED
-        else if (jobState == JobState.SCHEDULING) {
+        } else if (jobState == JobState.SCHEDULING) { // SCHEDULING -> RUNNING/FAILED/CANCELLED
             switch (newState) {
                 case RUNNING:
                     startTime = System.currentTimeMillis();
@@ -200,9 +198,7 @@ else if (jobState == JobState.SCHEDULING) {
                 default:
                     throw new DdlException("Invalid job state transition from " + jobState + " to " + newState);
             }
-        }
-        // RUNNING -> FINISHED/FAILED/CANCELLED
-        else if (jobState == JobState.RUNNING) {
+        } else if (jobState == JobState.RUNNING) { // RUNNING -> FINISHED/FAILED/CANCELLED
             switch (newState) {
                 case FINISHED:
                 case FAILED:
diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java
index 3ca41ef10d54cb..aed76f127e128e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java
@@ -135,8 +135,8 @@ private void checkRestrict(long dbId, Set tableIds) throws AnalysisExcepti
 
         // check the number of unfinished tasks
         if (unfinishedJobs > Config.cbo_max_statistics_job_num) {
-            throw new AnalysisException("The unfinished statistics job could not more than cbo_max_statistics_job_num: " +
-                    Config.cbo_max_statistics_job_num);
+            throw new AnalysisException("The unfinished statistics job could not more than cbo_max_statistics_job_num: "
+                    + Config.cbo_max_statistics_job_num);
         }
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTask.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTask.java
index 76af28c48fd60f..fda2c28c779e80 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTask.java
@@ -134,9 +134,7 @@ public void updateTaskState(TaskState newState) throws DdlException {
                 default:
                     throw new DdlException(errorMsg + taskState + " to " + newState);
             }
-        }
-        // RUNNING -> FINISHED/FAILED
-        else if (taskState == TaskState.RUNNING) {
+        } else if (taskState == TaskState.RUNNING) { // RUNNING -> FINISHED/FAILED
             switch (newState) {
                 case FINISHED:
                 case FAILED:
@@ -145,9 +143,7 @@ else if (taskState == TaskState.RUNNING) {
                 default:
                     throw new DdlException(errorMsg + taskState + " to " + newState);
             }
-        }
-        // unsupported state transition
-        else {
+        } else { // unsupported state transition
             throw new DdlException(errorMsg + taskState + " to " + newState);
         }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java
index 378b2cd811b63a..17ebc209180387 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java
@@ -63,7 +63,7 @@ protected void runAfterCatalogReady() {
 
         if (!tasks.isEmpty()) {
             ThreadPoolExecutor executor = ThreadPoolManager.newDaemonCacheThreadPool(tasks.size(),
-                "statistic-pool", false);
+                    "statistic-pool", false);
             StatisticsJobManager jobManager = Catalog.getCurrentCatalog().getStatisticsJobManager();
             Map statisticsJobs = jobManager.getIdToStatisticsJob();
             Map>>> resultMap = Maps.newLinkedHashMap();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java b/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java
index 4d1c873d57682e..d62e47bdf27e93 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java
@@ -139,9 +139,9 @@ public static List> diagnoseTablet(long tabletId) {
             } while (false);
             // version
             if (replica.getVersion() != partition.getVisibleVersion()) {
-                versionErr.append("Replica on backend " + replica.getBackendId() + "'s version (" +
-                        replica.getVersion() + ") does not equal" +
-                        " to partition visible version (" + partition.getVisibleVersion() + ")");
+                versionErr.append("Replica on backend " + replica.getBackendId() + "'s version ("
+                        + replica.getVersion() + ") does not equal"
+                        + " to partition visible version (" + partition.getVisibleVersion() + ")");
             }
             // status
             if (!replica.isAlive()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/ClearTransactionTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/ClearTransactionTask.java
index 3272880085deb3..2c2bcfe95ff169 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/ClearTransactionTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/ClearTransactionTask.java
@@ -36,7 +36,7 @@ public ClearTransactionTask(long backendId, long transactionId, List parti
 
     public TClearTransactionTaskRequest toThrift() {
         TClearTransactionTaskRequest clearTransactionTaskRequest = new TClearTransactionTaskRequest(
-            transactionId, partitionIds);
+                transactionId, partitionIds);
         return clearTransactionTaskRequest;
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java
index 7c8072882343f2..29068f7542d830 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java
@@ -217,7 +217,7 @@ public TCreateTabletReq toThrift() {
             }
             // when doing schema change, some modified column has a prefix in name.
             // this prefix is only used in FE, not visible to BE, so we should remove this prefix.
-            if(column.getName().startsWith(SchemaChangeHandler.SHADOW_NAME_PRFIX)) {
+            if (column.getName().startsWith(SchemaChangeHandler.SHADOW_NAME_PRFIX)) {
                 tColumn.setColumnName(column.getName().substring(SchemaChangeHandler.SHADOW_NAME_PRFIX.length()));
             }
             tColumn.setVisible(column.isVisible());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
index 19fd79b1f4a80f..536ba3caa0ba76 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
@@ -210,7 +210,7 @@ private Map createEtlIndices(OlapTable table, long partitionId
                 } else {
                     dppColumn.put("is_key", false);
                     String aggregation = "none";
-                    if ("AGG_KEYS" == table.getKeysType().name()) {
+                    if ("AGG_KEYS".equals(table.getKeysType().name())) {
                         AggregateType aggregateType = column.getAggregationType();
                         if (AggregateType.SUM == aggregateType) {
                             aggregation = "ADD";
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java
index 75c3fb2cee84b5..c24189141d5905 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java
@@ -99,9 +99,9 @@ public PushTask(TResourceInfo resourceInfo, long backendId, long dbId, long tabl
             String filePath, long fileSize, int timeoutSecond, long loadJobId, TPushType pushType,
             List conditions, boolean needDecompress, TPriority priority) {
         this(resourceInfo, backendId, dbId, tableId, partitionId, indexId,
-             tabletId, replicaId, schemaHash, version, filePath,
-             fileSize, timeoutSecond, loadJobId, pushType, conditions, needDecompress,
-             priority, TTaskType.PUSH, -1, tableId);
+                tabletId, replicaId, schemaHash, version, filePath,
+                fileSize, timeoutSecond, loadJobId, pushType, conditions, needDecompress,
+                priority, TTaskType.PUSH, -1, tableId);
     }
 
     // for load v2 (SparkLoadJob)
@@ -110,9 +110,9 @@ public PushTask(long backendId, long dbId, long tableId, long partitionId, long
                     TPriority priority, long transactionId, long signature,
                     TBrokerScanRange tBrokerScanRange, TDescriptorTable tDescriptorTable) {
         this(null, backendId, dbId, tableId, partitionId, indexId,
-             tabletId, replicaId, schemaHash, -1, null,
-             0, timeoutSecond, loadJobId, pushType, null, false,
-             priority, TTaskType.REALTIME_PUSH, transactionId, signature);
+                tabletId, replicaId, schemaHash, -1, null,
+                0, timeoutSecond, loadJobId, pushType, null, false,
+                priority, TTaskType.REALTIME_PUSH, transactionId, signature);
         this.tBrokerScanRange = tBrokerScanRange;
         this.tDescriptorTable = tDescriptorTable;
     }
@@ -164,7 +164,7 @@ public TPushReq toThrift() {
                         tCondition.setColumnName(columnName);
                         tCondition.setConditionOp(op);
                         for (int i = 1; i <= inPredicate.getInElementNum(); i++) {
-                            conditionValues.add(((LiteralExpr)inPredicate.getChild(i)).getStringValue());
+                            conditionValues.add(inPredicate.getChild(i).getStringValue());
                         }
                     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
index 1101d31996cfa5..3c00f114a8e50f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
@@ -663,11 +663,12 @@ public boolean waitForTransactionFinished(Database db, long transactionId, long
 
         long currentTimeMillis = System.currentTimeMillis();
         long timeoutTimeMillis = currentTimeMillis + timeoutMillis;
-        while (currentTimeMillis < timeoutTimeMillis &&
-                transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
+        while (currentTimeMillis < timeoutTimeMillis
+                && transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
             try {
                 transactionState.waitTransactionVisible(timeoutMillis);
             } catch (InterruptedException e) {
+                // CHECKSTYLE IGNORE THIS LINE
             }
             currentTimeMillis = System.currentTimeMillis();
         }
@@ -682,12 +683,13 @@ public void replayDeleteTransaction(TransactionState transactionState) {
             // here we only delete the oldest element, so if element exist in finalStatusTransactionStateDeque,
             // it must at the front of the finalStatusTransactionStateDeque.
             // check both "short" and "long" queue.
-            if (!finalStatusTransactionStateDequeShort.isEmpty() &&
-                    transactionState.getTransactionId() == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) {
+            if (!finalStatusTransactionStateDequeShort.isEmpty()
+                    && transactionState.getTransactionId() == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) {
                 finalStatusTransactionStateDequeShort.pop();
                 clearTransactionState(transactionState.getTransactionId());
-            } else if (!finalStatusTransactionStateDequeLong.isEmpty() &&
-                    transactionState.getTransactionId() == finalStatusTransactionStateDequeLong.getFirst().getTransactionId()) {
+            } else if (!finalStatusTransactionStateDequeLong.isEmpty()
+                    && transactionState.getTransactionId()
+                    == finalStatusTransactionStateDequeLong.getFirst().getTransactionId()) {
                 finalStatusTransactionStateDequeLong.pop();
                 clearTransactionState(transactionState.getTransactionId());
             }
@@ -703,12 +705,12 @@ public void replayBatchRemoveTransaction(List txnIds) {
                 // here we only delete the oldest element, so if element exist in finalStatusTransactionStateDeque,
                 // it must at the front of the finalStatusTransactionStateDeque
                 // check both "short" and "long" queue.
-                if (!finalStatusTransactionStateDequeShort.isEmpty() &&
-                        txnId == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) {
+                if (!finalStatusTransactionStateDequeShort.isEmpty()
+                        && txnId == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) {
                     finalStatusTransactionStateDequeShort.pop();
                     clearTransactionState(txnId);
-                } else if (!finalStatusTransactionStateDequeLong.isEmpty() &&
-                        txnId == finalStatusTransactionStateDequeLong.getFirst().getTransactionId()) {
+                } else if (!finalStatusTransactionStateDequeLong.isEmpty()
+                        && txnId == finalStatusTransactionStateDequeLong.getFirst().getTransactionId()) {
                     finalStatusTransactionStateDequeLong.pop();
                     clearTransactionState(txnId);
                 }
@@ -831,8 +833,8 @@ public void finishTransaction(long transactionId, Set errorReplicaIds) thr
                         continue;
                     }
                     if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) {
-                        LOG.debug("transactionId {} partition commitInfo version {} is not equal with " +
-                                        "partition visible version {} plus one, need wait",
+                        LOG.debug("transactionId {} partition commitInfo version {} is not equal with "
+                                        + "partition visible version {} plus one, need wait",
                                 transactionId,
                                 partitionCommitInfo.getVersion(),
                                 partition.getVisibleVersion());
@@ -1196,8 +1198,8 @@ private boolean unprotectAbortTransaction(long transactionId, String reason)
             throw new TransactionNotFoundException("transaction [" + transactionId + "] not found.");
         }
         if (transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
-            throw new TransactionNotFoundException("transaction [" + transactionId + "] is already aborted, " +
-                    "abort reason: " + transactionState.getReason());
+            throw new TransactionNotFoundException("transaction [" + transactionId + "] is already aborted, "
+                    + "abort reason: " + transactionState.getReason());
         }
         if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED
                 || transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
@@ -1339,8 +1341,8 @@ private void clearTransactionState(long txnId) {
     }
 
     public int getTransactionNum() {
-        return idToRunningTransactionState.size() + finalStatusTransactionStateDequeShort.size() +
-               finalStatusTransactionStateDequeLong.size();
+        return idToRunningTransactionState.size() + finalStatusTransactionStateDequeShort.size()
+                + finalStatusTransactionStateDequeLong.size();
     }
 
 
@@ -1629,8 +1631,8 @@ public void removeExpiredAndTimeoutTxns(long currentMillis) {
     }
 
     public void replayUpsertTransactionState(TransactionState transactionState) throws MetaNotFoundException {
-        boolean shouldAddTableListLock  = transactionState.getTransactionStatus() == TransactionStatus.COMMITTED ||
-                transactionState.getTransactionStatus() == TransactionStatus.VISIBLE;
+        boolean shouldAddTableListLock  = transactionState.getTransactionStatus() == TransactionStatus.COMMITTED
+                || transactionState.getTransactionStatus() == TransactionStatus.VISIBLE;
         Database db = null;
         List tableList = null;
         if (shouldAddTableListLock) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java
index bd42ea54714d4c..ceb2c857c0478f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java
@@ -143,8 +143,7 @@ public long beginTransaction(long dbId, List tableIdList, String label, TU
     }
 
     private void checkValidTimeoutSecond(long timeoutSecond, int maxLoadTimeoutSecond, int minLoadTimeOutSecond) throws AnalysisException {
-        if (timeoutSecond > maxLoadTimeoutSecond ||
-                timeoutSecond < minLoadTimeOutSecond) {
+        if (timeoutSecond > maxLoadTimeoutSecond || timeoutSecond < minLoadTimeOutSecond) {
             throw new AnalysisException("Invalid timeout: " + timeoutSecond + ". Timeout should between "
                     + minLoadTimeOutSecond + " and " + maxLoadTimeoutSecond
                     + " seconds");
@@ -278,8 +277,8 @@ public void commitTransaction2PC(Database db, List
tableList, long transa MetaLockUtils.writeUnlockTables(tableList); } stopWatch.stop(); - LOG.info("stream load tasks are committed successfully. txns: {}. time cost: {} ms." + - " data will be visable later.", transactionId, stopWatch.getTime()); + LOG.info("stream load tasks are committed successfully. txns: {}. time cost: {} ms." + + " data will be visable later.", transactionId, stopWatch.getTime()); } public void abortTransaction(long dbId, long transactionId, String reason) throws UserException { @@ -588,7 +587,7 @@ public TWaitingTxnStatusResult getWaitingTxnStatus(TWaitingTxnStatusRequest requ long dbId = request.getDbId(); int commitTimeoutSec = Config.commit_timeout_second; for (int i = 0; i < commitTimeoutSec; ++i) { - Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(dbId); + Catalog.getCurrentCatalog().getDbOrAnalysisException(dbId); TWaitingTxnStatusResult statusResult = new TWaitingTxnStatusResult(); statusResult.status = new TStatus(); TransactionStatus txnStatus = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TabletQuorumFailedException.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TabletQuorumFailedException.java index 2aa3a3b9a408d0..741babff4b18b4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TabletQuorumFailedException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TabletQuorumFailedException.java @@ -35,9 +35,8 @@ public TabletQuorumFailedException(long transactionId, long tabletId, int successReplicaNum, int quorumReplicaNum, Set errorBackendIdsForTablet) { super(String.format(TABLET_QUORUM_FAILED_MSG, transactionId, tabletId, - successReplicaNum, quorumReplicaNum, - Joiner.on(",").join(errorBackendIdsForTablet)), - transactionId); + successReplicaNum, quorumReplicaNum, + Joiner.on(",").join(errorBackendIdsForTablet)), transactionId); this.tabletId = tabletId; this.errorBackendIdsForTablet = errorBackendIdsForTablet; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java index 7139077b6afdc8..988514e5f7f6e5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java @@ -539,8 +539,10 @@ public boolean isShortTxn() { // return true if txn is running but timeout public boolean isTimeout(long currentMillis) { - return (transactionStatus == TransactionStatus.PREPARE && currentMillis - prepareTime > timeoutMs) || - (transactionStatus == TransactionStatus.PRECOMMITTED && currentMillis - preCommitTime > preCommittedTimeoutMs); + return (transactionStatus == TransactionStatus.PREPARE + && currentMillis - prepareTime > timeoutMs) + || (transactionStatus == TransactionStatus.PRECOMMITTED + && currentMillis - preCommitTime > preCommittedTimeoutMs); } public synchronized void addTableIndexes(OlapTable table) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterJobV2Test.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterJobV2Test.java index 5e68656d667cfb..65f4a045bf6d44 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterJobV2Test.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterJobV2Test.java @@ -142,21 +142,21 @@ public void testRollup() throws Exception { @Test public void testDupTableSchemaChange() throws Exception { - createTable("CREATE TABLE test.dup_table (\n" + - " k1 bigint(20) NULL ,\n" + - " k2 bigint(20) NULL ,\n" + - " k3 bigint(20) NULL,\n" + - " v1 bigint(20) NULL ,\n" + - " v2 varchar(1) NULL,\n" + - " v3 varchar(1) NULL \n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(k1, k2, k3)\n" + - "PARTITION BY RANGE(k1, v1)\n" + - "(PARTITION p1 VALUES LESS THAN (\"10\", \"10\"))\n" + - "DISTRIBUTED BY HASH(v1,k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); + createTable("CREATE TABLE test.dup_table (\n" + + " k1 bigint(20) NULL ,\n" + + " k2 bigint(20) NULL ,\n" + + " k3 bigint(20) NULL,\n" + + " v1 bigint(20) NULL ,\n" + + " v2 varchar(1) NULL,\n" + + " v3 varchar(1) NULL \n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(k1, k2, k3)\n" + + "PARTITION BY RANGE(k1, v1)\n" + + "(PARTITION p1 VALUES LESS THAN (\"10\", \"10\"))\n" + + "DISTRIBUTED BY HASH(v1,k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); alterTable("alter table test.dup_table add rollup r1(v1,v2,k2,k1);"); @@ -167,33 +167,33 @@ public void testDupTableSchemaChange() throws Exception { @Test public void testCreateMVForListPartitionTable() throws Exception { - createTable("CREATE TABLE test.list_tbl (\n" + - "city VARCHAR(20) NOT NULL,\n" + - "user_id BIGINT NOT NULL,\n" + - "date DATE NOT NULL,\n" + - "age SMALLINT NOT NULL,\n" + - "sex TINYINT NOT NULL,\n" + - "cost BIGINT NOT NULL DEFAULT \"0\"\n" + - ") DUPLICATE KEY(city) PARTITION BY LIST(city) (\n" + - "PARTITION p_bj\n" + - "VALUES IN (\"beijing\"),\n" + - "PARTITION p_gz\n" + - "VALUES IN (\"guangzhou\"),\n" + - "PARTITION p_sz\n" + - "VALUES IN (\"shenzhen\")\n" + - ") DISTRIBUTED BY HASH(date) BUCKETS 1 PROPERTIES(\"replication_num\" = \"1\");"); - - createMaterializedView("create materialized view list_view as\n" + - "select city,\n" + - "user_id,\n" + - "date,\n" + - "sum(cost)\n" + - "from\n" + - "test.list_tbl\n" + - "group by\n" + - "city,\n" + - "user_id,\n" + - "date;"); + createTable("CREATE TABLE test.list_tbl (\n" + + "city VARCHAR(20) NOT NULL,\n" + + "user_id BIGINT NOT NULL,\n" + + "date DATE NOT NULL,\n" + + "age SMALLINT NOT NULL,\n" + + "sex TINYINT NOT NULL,\n" + + "cost BIGINT NOT NULL DEFAULT \"0\"\n" + + ") DUPLICATE KEY(city) PARTITION BY LIST(city) (\n" + + "PARTITION p_bj\n" + + "VALUES IN (\"beijing\"),\n" + + "PARTITION p_gz\n" + + "VALUES IN (\"guangzhou\"),\n" + + "PARTITION p_sz\n" + + "VALUES IN (\"shenzhen\")\n" + + ") DISTRIBUTED BY HASH(date) BUCKETS 1 PROPERTIES(\"replication_num\" = \"1\");"); + + createMaterializedView("create materialized view list_view as\n" + + "select city,\n" + + "user_id,\n" + + "date,\n" + + "sum(cost)\n" + + "from\n" + + "test.list_tbl\n" + + "group by\n" + + "city,\n" + + "user_id,\n" + + "date;"); Map alterJobs = Catalog.getCurrentCatalog().getMaterializedViewHandler().getAlterJobsV2(); waitAlterJobDone(alterJobs); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index 869231216ee029..293d73341f3b44 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -78,149 +78,149 @@ public static void beforeClass() throws Exception { CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext); Catalog.getCurrentCatalog().createDb(createDbStmt); - createTable("CREATE TABLE test.tbl1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl2\n" + - "(\n" + - " k1 date,\n" + - " v1 int sum\n" + - ")\n" + - "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl3\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl4\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01'),\n" + - " PARTITION p3 values less than('2020-04-01'),\n" + - " PARTITION p4 values less than('2020-05-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES" + - "(" + - " 'replication_num' = '1',\n" + - " 'in_memory' = 'false',\n" + - " 'storage_medium' = 'SSD',\n" + - " 'storage_cooldown_time' = '2999-12-31 00:00:00'\n" + - ");"); - - createTable("CREATE TABLE test.tbl5\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int \n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY (k1,k2)\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); - - createTable("create external table test.odbc_table\n" + - "( `k1` bigint(20) COMMENT \"\",\n" + - " `k2` datetime COMMENT \"\",\n" + - " `k3` varchar(20) COMMENT \"\",\n" + - " `k4` varchar(100) COMMENT \"\",\n" + - " `k5` float COMMENT \"\"\n" + - ")ENGINE=ODBC\n" + - "PROPERTIES (\n" + - "\"host\" = \"127.0.0.1\",\n" + - "\"port\" = \"3306\",\n" + - "\"user\" = \"root\",\n" + - "\"password\" = \"123\",\n" + - "\"database\" = \"db1\",\n" + - "\"table\" = \"tbl1\",\n" + - "\"driver\" = \"Oracle Driver\",\n" + - "\"odbc_type\" = \"oracle\"\n" + - ");"); + createTable("CREATE TABLE test.tbl1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl2\n" + + "(\n" + + " k1 date,\n" + + " v1 int sum\n" + + ")\n" + + "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl3\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl4\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + + "(" + + " 'replication_num' = '1',\n" + + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + + " 'storage_cooldown_time' = '2999-12-31 00:00:00'\n" + + ");"); + + createTable("CREATE TABLE test.tbl5\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int \n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY (k1,k2)\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("create external table test.odbc_table\n" + + "( `k1` bigint(20) COMMENT \"\",\n" + + " `k2` datetime COMMENT \"\",\n" + + " `k3` varchar(20) COMMENT \"\",\n" + + " `k4` varchar(100) COMMENT \"\",\n" + + " `k5` float COMMENT \"\"\n" + + ")ENGINE=ODBC\n" + + "PROPERTIES (\n" + + "\"host\" = \"127.0.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"root\",\n" + + "\"password\" = \"123\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\",\n" + + "\"driver\" = \"Oracle Driver\",\n" + + "\"odbc_type\" = \"oracle\"\n" + + ");"); // s3 resource - createRemoteStorageResource("create resource \"remote_s3\"\n" + - "properties\n" + - "(\n" + - " \"type\" = \"s3\", \n" + - " \"s3_endpoint\" = \"bj\",\n" + - " \"s3_region\" = \"bj\",\n" + - " \"s3_root_path\" = \"/path/to/root\",\n" + - " \"s3_access_key\" = \"bbb\",\n" + - " \"s3_secret_key\" = \"aaaa\",\n" + - " \"s3_max_connections\" = \"50\",\n" + - " \"s3_request_timeout_ms\" = \"3000\",\n" + - " \"s3_connection_timeout_ms\" = \"1000\"\n" + - ");"); - - createRemoteStorageResource("create resource \"remote_s3_1\"\n" + - "properties\n" + - "(\n" + - " \"type\" = \"s3\", \n" + - " \"s3_endpoint\" = \"bj\",\n" + - " \"s3_region\" = \"bj\",\n" + - " \"s3_root_path\" = \"/path/to/root\",\n" + - " \"s3_access_key\" = \"bbb\",\n" + - " \"s3_secret_key\" = \"aaaa\",\n" + - " \"s3_max_connections\" = \"50\",\n" + - " \"s3_request_timeout_ms\" = \"3000\",\n" + - " \"s3_connection_timeout_ms\" = \"1000\"\n" + - ");"); - - createTable("CREATE TABLE test.tbl_remote\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01'),\n" + - " PARTITION p3 values less than('2020-04-01'),\n" + - " PARTITION p4 values less than('2020-05-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES" + - "(" + - " 'replication_num' = '1',\n" + - " 'in_memory' = 'false',\n" + - " 'storage_medium' = 'SSD',\n" + - " 'storage_cooldown_time' = '2122-04-01 20:24:00',\n" + - " 'remote_storage_resource' = 'remote_s3',\n" + - " 'remote_storage_cooldown_time' = '2122-12-01 20:23:00'" + - ");"); + createRemoteStorageResource("create resource \"remote_s3\"\n" + + "properties\n" + + "(\n" + + " \"type\" = \"s3\", \n" + + " \"s3_endpoint\" = \"bj\",\n" + + " \"s3_region\" = \"bj\",\n" + + " \"s3_root_path\" = \"/path/to/root\",\n" + + " \"s3_access_key\" = \"bbb\",\n" + + " \"s3_secret_key\" = \"aaaa\",\n" + + " \"s3_max_connections\" = \"50\",\n" + + " \"s3_request_timeout_ms\" = \"3000\",\n" + + " \"s3_connection_timeout_ms\" = \"1000\"\n" + + ");"); + + createRemoteStorageResource("create resource \"remote_s3_1\"\n" + + "properties\n" + + "(\n" + + " \"type\" = \"s3\", \n" + + " \"s3_endpoint\" = \"bj\",\n" + + " \"s3_region\" = \"bj\",\n" + + " \"s3_root_path\" = \"/path/to/root\",\n" + + " \"s3_access_key\" = \"bbb\",\n" + + " \"s3_secret_key\" = \"aaaa\",\n" + + " \"s3_max_connections\" = \"50\",\n" + + " \"s3_request_timeout_ms\" = \"3000\",\n" + + " \"s3_connection_timeout_ms\" = \"1000\"\n" + + ");"); + + createTable("CREATE TABLE test.tbl_remote\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + + "(" + + " 'replication_num' = '1',\n" + + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + + " 'storage_cooldown_time' = '2122-04-01 20:24:00',\n" + + " 'remote_storage_resource' = 'remote_s3',\n" + + " 'remote_storage_cooldown_time' = '2122-12-01 20:23:00'" + + ");"); } @AfterClass @@ -346,13 +346,13 @@ public void testConflictAlterOperations() throws Exception { // enable dynamic partition // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 - stmt = "alter table test.tbl1 set (\n" + - "'dynamic_partition.enable' = 'true',\n" + - "'dynamic_partition.time_unit' = 'DAY',\n" + - "'dynamic_partition.end' = '3',\n" + - "'dynamic_partition.prefix' = 'p',\n" + - "'dynamic_partition.buckets' = '3'\n" + - " );"; + stmt = "alter table test.tbl1 set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; alterTable(stmt, false); Database db = Catalog.getCurrentCatalog().getDbOrMetaException("default_cluster:test"); OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl1"); @@ -539,11 +539,11 @@ public void testAlterRemoteStorageTableDataProperties() throws Exception { Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); // alter recover to old state - stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set (" + - "'storage_medium' = 'SSD', " + - "'storage_cooldown_time' = '2122-04-01 20:24:00', " + - "'remote_storage_cooldown_time' = '2122-12-01 20:23:00'" + - ")"; + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set (" + + "'storage_medium' = 'SSD', " + + "'storage_cooldown_time' = '2122-04-01 20:24:00', " + + "'remote_storage_cooldown_time' = '2122-12-01 20:23:00'" + + ")"; alterTable(stmt, false); for (Partition partition : partitionList) { Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); @@ -555,14 +555,14 @@ public void testAlterRemoteStorageTableDataProperties() throws Exception { @Test public void testDynamicPartitionDropAndAdd() throws Exception { // test day range - String stmt = "alter table test.tbl3 set (\n" + - "'dynamic_partition.enable' = 'true',\n" + - "'dynamic_partition.time_unit' = 'DAY',\n" + - "'dynamic_partition.start' = '-3',\n" + - "'dynamic_partition.end' = '3',\n" + - "'dynamic_partition.prefix' = 'p',\n" + - "'dynamic_partition.buckets' = '3'\n" + - " );"; + String stmt = "alter table test.tbl3 set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.start' = '-3',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; alterTable(stmt, false); Thread.sleep(5000); // sleep to wait dynamic partition scheduler run @@ -596,70 +596,70 @@ private void waitSchemaChangeJobDone(boolean rollupJob) throws Exception { @Test public void testSetDynamicPropertiesInNormalTable() throws Exception { String tableName = "no_dynamic_table"; - String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); String alterStmt = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"true\");"; - String errorMsg = "errCode = 2, detailMessage = Table default_cluster:test.no_dynamic_table is not a dynamic partition table. " + - "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."; + String errorMsg = "errCode = 2, detailMessage = Table default_cluster:test.no_dynamic_table is not a dynamic partition table. " + + "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."; alterTableWithExceptionMsg(alterStmt, errorMsg); // test set dynamic properties in a no dynamic partition table - String stmt = "alter table test." + tableName + " set (\n" + - "'dynamic_partition.enable' = 'true',\n" + - "'dynamic_partition.time_unit' = 'DAY',\n" + - "'dynamic_partition.start' = '-3',\n" + - "'dynamic_partition.end' = '3',\n" + - "'dynamic_partition.prefix' = 'p',\n" + - "'dynamic_partition.buckets' = '3'\n" + - " );"; + String stmt = "alter table test." + tableName + " set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.start' = '-3',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; alterTable(stmt, false); } @Test public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { String tableName = "dynamic_table"; - String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); String alterStmt1 = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"false\");"; @@ -678,48 +678,48 @@ public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { @Test public void testReplaceTable() throws Exception { - String stmt1 = "CREATE TABLE test.replace1\n" + - "(\n" + - " k1 int, k2 int, k3 int sum\n" + - ")\n" + - "AGGREGATE KEY(k1, k2)\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + - "rollup (\n" + - "r1(k1),\n" + - "r2(k2, k3)\n" + - ")\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; - - - String stmt2 = "CREATE TABLE test.r1\n" + - "(\n" + - " k1 int, k2 int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; - - String stmt3 = "CREATE TABLE test.replace2\n" + - "(\n" + - " k1 int, k2 int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; - - String stmt4 = "CREATE TABLE test.replace3\n" + - "(\n" + - " k1 int, k2 int, k3 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - "\tPARTITION p1 values less than(\"100\"),\n" + - "\tPARTITION p2 values less than(\"200\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" + - "rollup (\n" + - "r3(k1),\n" + - "r4(k2, k3)\n" + - ")\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + String stmt1 = "CREATE TABLE test.replace1\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "AGGREGATE KEY(k1, k2)\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "rollup (\n" + + "r1(k1),\n" + + "r2(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + + String stmt2 = "CREATE TABLE test.r1\n" + + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + String stmt3 = "CREATE TABLE test.replace2\n" + + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + String stmt4 = "CREATE TABLE test.replace3\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "\tPARTITION p1 values less than(\"100\"),\n" + + "\tPARTITION p2 values less than(\"200\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" + + "rollup (\n" + + "r3(k1),\n" + + "r4(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; createTable(stmt1); createTable(stmt2); @@ -822,19 +822,19 @@ public void testReplaceTable() throws Exception { @Test public void testModifyBucketNum() throws Exception { - String stmt = "CREATE TABLE test.bucket\n" + - "(\n" + - " k1 int, k2 int, k3 int sum\n" + - ")\n" + - "ENGINE = OLAP\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"100000\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"200000\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"300000\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + String stmt = "CREATE TABLE test.bucket\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "ENGINE = OLAP\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"100000\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"200000\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"300000\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; createTable(stmt); Database db = Catalog.getCurrentCatalog().getDbOrMetaException("default_cluster:test"); @@ -853,19 +853,19 @@ public void testModifyBucketNum() throws Exception { @Test public void testChangeOrder() throws Exception { - createTable("CREATE TABLE test.change_order\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + createTable("CREATE TABLE test.change_order\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String changeOrderStmt = "ALTER TABLE test.change_order ORDER BY (k2, k1, v1);;"; alterTable(changeOrderStmt, false); @@ -873,20 +873,20 @@ public void testChangeOrder() throws Exception { @Test public void testAlterUniqueTablePartitionColumn() throws Exception { - createTable("CREATE TABLE test.unique_partition\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int\n" + - ")\n" + - "UNIQUE KEY(k1, k2)\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + createTable("CREATE TABLE test.unique_partition\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int\n" + + ")\n" + + "UNIQUE KEY(k1, k2)\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); // partition key can not be changed. // this test is also for validating a bug fix about invisible columns(delete flag column) @@ -996,21 +996,21 @@ public void testExternalTableAlterOperations() throws Exception { @Test public void testModifyTableEngine() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.mysql_table (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=MYSQL\n" + - "PROPERTIES (\n" + - "\"host\" = \"172.16.0.1\",\n" + - "\"port\" = \"3306\",\n" + - "\"user\" = \"cmy\",\n" + - "\"password\" = \"abc\",\n" + - "\"database\" = \"db1\",\n" + - "\"table\" = \"tbl1\"" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.mysql_table (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=MYSQL\n" + + "PROPERTIES (\n" + + "\"host\" = \"172.16.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"cmy\",\n" + + "\"password\" = \"abc\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\"" + + ");"; createTable(createOlapTblStmt); Database db = Catalog.getCurrentCatalog().getDbNullable("default_cluster:test"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/MaterializedViewHandlerTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/MaterializedViewHandlerTest.java index d941ff60438803..9e81e48588c243 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/MaterializedViewHandlerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/MaterializedViewHandlerTest.java @@ -276,7 +276,6 @@ public void checkInvalidPartitionKeyMV(@Injectable CreateMaterializedViewStmt cr @Injectable OlapTable olapTable) throws DdlException { final String mvName = "mv1"; final String columnName1 = "k1"; - Column baseColumn1 = new Column(columnName1, Type.VARCHAR, true, null, "", ""); MVColumnItem mvColumnItem = new MVColumnItem(columnName1, Type.VARCHAR); mvColumnItem.setIsKey(false); mvColumnItem.setAggregationType(AggregateType.SUM, false); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeJobV2Test.java b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeJobV2Test.java index cbc6993733c92b..62a6187064b249 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeJobV2Test.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeJobV2Test.java @@ -367,8 +367,9 @@ public void modifyDynamicPartitionWithoutTableProperty(String propertyKey, Strin OlapTable olapTable = (OlapTable) db.getTableOrDdlException(CatalogMocker.TEST_TBL2_ID); expectedEx.expect(DdlException.class); - expectedEx.expectMessage("errCode = 2, detailMessage = Table test_db.test_tbl2 is not a dynamic partition table. " + - "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."); + expectedEx.expectMessage("errCode = 2," + + " detailMessage = Table test_db.test_tbl2 is not a dynamic partition table. " + + "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."); schemaChangeHandler.process(alterClauses, "default_cluster", db, olapTable); } @@ -389,7 +390,7 @@ public void testSerializeOfSchemaChangeJob() throws IOException { file.deleteOnExit(); DataOutputStream out = new DataOutputStream(new FileOutputStream(file)); - SchemaChangeJobV2 schemaChangeJobV2 = new SchemaChangeJobV2(1, 1,1, "test",600000); + SchemaChangeJobV2 schemaChangeJobV2 = new SchemaChangeJobV2(1, 1, 1, "test", 600000); schemaChangeJobV2.setStorageFormat(TStorageFormat.V2); Deencapsulation.setField(schemaChangeJobV2, "jobState", AlterJobV2.JobState.FINISHED); Map indexSchemaVersionAndHashMap = Maps.newHashMap(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java index 6869e504c67429..8054dba45b46cd 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java @@ -321,11 +321,11 @@ public static Analyzer fetchAdminAnalyzer(boolean withCluster) { { analyzer.getDefaultDb(); minTimes = 0; - result = withCluster? prefix + "testDb" : "testDb"; + result = withCluster ? prefix + "testDb" : "testDb"; analyzer.getQualifiedUser(); - minTimes = 0 ; - result = withCluster? prefix + "testUser" : "testUser"; + minTimes = 0; + result = withCluster ? prefix + "testUser" : "testUser"; analyzer.getClusterName(); minTimes = 0; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java index afd27b54a89100..2e6f47a58f5720 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java @@ -56,7 +56,7 @@ public void setUp() { @Test public void testParticularBackends() throws AnalysisException { List backends = Lists.newArrayList( - "192.168.0.10003:9051", "192.168.0.10004:9051", "192.168.0.10005:9051", "192.168.0.10006:9051"); + "192.168.0.10003:9051", "192.168.0.10004:9051", "192.168.0.10005:9051", "192.168.0.10006:9051"); final AdminCancelRebalanceDiskStmt stmt = new AdminCancelRebalanceDiskStmt(backends); stmt.analyze(analyzer); Assert.assertEquals(2, stmt.getBackends().size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java index 814f408d11df1e..15835810045be3 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java @@ -56,7 +56,7 @@ public void setUp() { @Test public void testParticularBackends() throws AnalysisException { List backends = Lists.newArrayList( - "192.168.0.10003:9051", "192.168.0.10004:9051", "192.168.0.10005:9051", "192.168.0.10006:9051"); + "192.168.0.10003:9051", "192.168.0.10004:9051", "192.168.0.10005:9051", "192.168.0.10006:9051"); final AdminRebalanceDiskStmt stmt = new AdminRebalanceDiskStmt(backends); stmt.analyze(analyzer); Assert.assertEquals(2, stmt.getBackends().size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminSetConfigStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminSetConfigStmtTest.java index ab484d85bb267b..3ae8c86716169f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminSetConfigStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminSetConfigStmtTest.java @@ -38,9 +38,9 @@ public void testUnknownConfig() throws Exception { String stmt = "admin set frontend config(\"unknown_config\" = \"unknown\");"; AdminSetConfigStmt adminSetConfigStmt = (AdminSetConfigStmt) parseAndAnalyzeStmt(stmt); DdlException exception = Assertions.assertThrows(DdlException.class, - () -> Catalog.getCurrentCatalog().setConfig(adminSetConfigStmt)); + () -> Catalog.getCurrentCatalog().setConfig(adminSetConfigStmt)); Assertions.assertEquals("errCode = 2, detailMessage = Config 'unknown_config' does not exist", - exception.getMessage()); + exception.getMessage()); } @Test @@ -49,6 +49,6 @@ public void testEmptyConfig() { Assertions.assertThrows(AnalysisException.class, () -> parseAndAnalyzeStmt("admin set frontend config;")); Assertions.assertEquals("errCode = 2, detailMessage = config parameter size is not equal to 1", - exception.getMessage()); + exception.getMessage()); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminShowReplicaTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminShowReplicaTest.java index 48b49276f4239e..64016893026b4d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminShowReplicaTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminShowReplicaTest.java @@ -39,15 +39,14 @@ public class AdminShowReplicaTest extends TestWithFeService { @Override protected void runBeforeAll() throws Exception { createDatabase("test"); - createTable("create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-07-01\"),\n" + - " partition p2 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10\n" + - "properties(\"replication_num\" = \"1\");"); + createTable("create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-07-01\"),\n" + + " partition p2 values less than(\"2021-08-01\")\n" + + ")\n" + "distributed by hash(k2) buckets 10\n" + + "properties(\"replication_num\" = \"1\");"); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AggregateTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AggregateTest.java index f6dd5dbcab5137..ce5ef14b55e078 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AggregateTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AggregateTest.java @@ -37,9 +37,9 @@ protected void runBeforeAll() throws Exception { FeConstants.runningUnitTest = true; dorisAssert = new DorisAssert(); dorisAssert.withDatabase(DB_NAME).useDatabase(DB_NAME); - String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME + " (empid int, name varchar, " + - "deptno int, salary int, commission int, time DATETIME) " - + "distributed by hash(empid) buckets 3 properties('replication_num' = '1');"; + String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME + " (empid int, name varchar, " + + "deptno int, salary int, commission int, time DATETIME) " + + "distributed by hash(empid) buckets 3 properties('replication_num' = '1');"; createTable(createTableSQL); } @@ -90,20 +90,20 @@ public void testWindowFunnelAnalysisException() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); // normal. - { - String query = "select empid, window_funnel(1, 'default', time, empid = 1, empid = 2) from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + { // CHECKSTYLE IGNORE THIS LINE + String query = "select empid, window_funnel(1, 'default', time, empid = 1, empid = 2) from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (Exception e) { Assert.fail("must be AnalysisException."); } - } + } // CHECKSTYLE IGNORE THIS LINE // less argument. do { - String query = "select empid, window_funnel(1, 'default', time) from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + String query = "select empid, window_funnel(1, 'default', time) from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (AnalysisException e) { @@ -113,12 +113,12 @@ public void testWindowFunnelAnalysisException() throws Exception { Assert.fail("must be AnalysisException."); } Assert.fail("must be AnalysisException."); - } while(false); + } while (false); // argument with wrong type. do { - String query = "select empid, window_funnel('xx', 'default', time, empid = 1) from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + String query = "select empid, window_funnel('xx', 'default', time, empid = 1) from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (AnalysisException e) { @@ -128,11 +128,11 @@ public void testWindowFunnelAnalysisException() throws Exception { Assert.fail("must be AnalysisException."); } Assert.fail("must be AnalysisException."); - } while(false); + } while (false); do { - String query = "select empid, window_funnel(1, 1, time, empid = 1) from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + String query = "select empid, window_funnel(1, 1, time, empid = 1) from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (AnalysisException e) { @@ -142,12 +142,12 @@ public void testWindowFunnelAnalysisException() throws Exception { Assert.fail("must be AnalysisException."); } Assert.fail("must be AnalysisException."); - } while(false); + } while (false); do { - String query = "select empid, window_funnel(1, '1', empid, '1') from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + String query = "select empid, window_funnel(1, '1', empid, '1') from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (AnalysisException e) { @@ -157,11 +157,11 @@ public void testWindowFunnelAnalysisException() throws Exception { Assert.fail("must be AnalysisException."); } Assert.fail("must be AnalysisException."); - } while(false); + } while (false); do { - String query = "select empid, window_funnel(1, '1', time, '1') from " + - DB_NAME + "." + TABLE_NAME + " group by empid"; + String query = "select empid, window_funnel(1, '1', time, '1') from " + + DB_NAME + "." + TABLE_NAME + " group by empid"; try { UtFrameUtils.parseAndAnalyzeStmt(query, ctx); } catch (AnalysisException e) { @@ -171,6 +171,6 @@ public void testWindowFunnelAnalysisException() throws Exception { Assert.fail("must be AnalysisException."); } Assert.fail("must be AnalysisException."); - } while(false); + } while (false); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterRoutineLoadStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterRoutineLoadStmtTest.java index 6edfda9548d805..66fb6ba992e2d2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterRoutineLoadStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterRoutineLoadStmtTest.java @@ -68,7 +68,7 @@ public void setUp() { @Test public void testNormal() { - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); jobProperties.put(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY, "200000"); @@ -96,7 +96,7 @@ public void testNormal() { Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("group.id")); Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("client.id")); Assert.assertEquals(3, stmt.getDataSourceProperties().getKafkaPartitionOffsets().size()); - } + } // CHECKSTYLE IGNORE THIS LINE } @Test(expected = AnalysisException.class) @@ -108,7 +108,7 @@ public void testNoProperties() throws AnalysisException, UserException { @Test public void testUnsupportedProperties() { - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.FORMAT, "csv"); AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), @@ -121,10 +121,10 @@ public void testUnsupportedProperties() { } catch (UserException e) { Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE // alter topic is now supported - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); String typeName = "kafka"; @@ -143,9 +143,9 @@ public void testUnsupportedProperties() { e.printStackTrace(); Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); String typeName = "kafka"; @@ -163,9 +163,9 @@ public void testUnsupportedProperties() { } catch (UserException e) { Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); String typeName = "kafka"; @@ -184,9 +184,9 @@ public void testUnsupportedProperties() { } catch (UserException e) { Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); String typeName = "kafka"; @@ -204,9 +204,9 @@ public void testUnsupportedProperties() { } catch (UserException e) { Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE - { + { // CHECKSTYLE IGNORE THIS LINE Map jobProperties = Maps.newHashMap(); jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100"); jobProperties.put(CreateRoutineLoadStmt.MAX_BATCH_SIZE_PROPERTY, "200000"); @@ -228,7 +228,7 @@ public void testUnsupportedProperties() { } catch (UserException e) { Assert.fail(); } - } + } // CHECKSTYLE IGNORE THIS LINE } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterTableStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterTableStmtTest.java index 3872c286ce8213..bb1eebad4a853c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterTableStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterTableStmtTest.java @@ -81,8 +81,9 @@ public void testAddRollup() throws UserException { ops.add(new AddRollupClause("index2", Lists.newArrayList("col2", "col3"), null, "testTbl", null)); AlterTableStmt stmt = new AlterTableStmt(new TableName("testDb", "testTbl"), ops); stmt.analyze(analyzer); - Assert.assertEquals("ALTER TABLE `testCluster:testDb`.`testTbl` ADD ROLLUP `index1` (`col1`, `col2`) FROM `testTbl`, \n" + - " `index2` (`col2`, `col3`) FROM `testTbl`", + Assert.assertEquals("ALTER TABLE `testCluster:testDb`.`testTbl`" + + " ADD ROLLUP `index1` (`col1`, `col2`) FROM `testTbl`, \n" + + " `index2` (`col2`, `col3`) FROM `testTbl`", stmt.toSql()); Assert.assertEquals("testCluster:testDb", stmt.getTbl().getDb()); Assert.assertEquals(2, stmt.getOps().size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/BetweenPredicateTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/BetweenPredicateTest.java index 66873d599688ce..91fb515b9d51c1 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/BetweenPredicateTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/BetweenPredicateTest.java @@ -36,6 +36,7 @@ public void testWithCompareAndBoundSubquery(@Injectable Subquery compareExpr, betweenPredicate.analyzeImpl(analyzer); Assert.fail(); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ColumnDefTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ColumnDefTest.java index adf50b8f4e9c51..968ff716b5e5ad 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ColumnDefTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ColumnDefTest.java @@ -73,20 +73,20 @@ public void testNormal() throws AnalysisException { @Test public void testReplaceIfNotNull() throws AnalysisException { - { + { // CHECKSTYLE IGNORE THIS LINE // not allow null ColumnDef column = new ColumnDef("col", intCol, false, AggregateType.REPLACE_IF_NOT_NULL, false, DefaultValue.NOT_SET, ""); column.analyze(true); Assert.assertEquals(AggregateType.REPLACE_IF_NOT_NULL, column.getAggregateType()); Assert.assertEquals("`col` int(11) REPLACE_IF_NOT_NULL NULL DEFAULT \"null\" COMMENT \"\"", column.toSql()); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE // not allow null ColumnDef column = new ColumnDef("col", intCol, false, AggregateType.REPLACE_IF_NOT_NULL, false, new DefaultValue(true, "10"), ""); column.analyze(true); Assert.assertEquals(AggregateType.REPLACE_IF_NOT_NULL, column.getAggregateType()); Assert.assertEquals("`col` int(11) REPLACE_IF_NOT_NULL NULL DEFAULT \"10\" COMMENT \"\"", column.toSql()); - } + } // CHECKSTYLE IGNORE THIS LINE } @Test(expected = AnalysisException.class) @@ -103,7 +103,7 @@ public void testStrSum() throws AnalysisException { } @Test - public void testBooleanDefaultValue() throws AnalysisException{ + public void testBooleanDefaultValue() throws AnalysisException { ColumnDef column1 = new ColumnDef("col", booleanCol, true, null, true, new DefaultValue(true, "1"), ""); column1.analyze(true); Assert.assertEquals("1", column1.getDefaultValue()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ComparisonPredicateTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ComparisonPredicateTest.java index d050c94d90c159..c13b31909eb319 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ComparisonPredicateTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ComparisonPredicateTest.java @@ -55,6 +55,7 @@ public void testMultiColumnSubquery(@Injectable Expr child0, binaryPredicate.analyzeImpl(analyzer); Assert.fail(); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } } @@ -102,12 +103,14 @@ public void testWrongOperand(@Injectable Expr child0, @Injectable Expr child1) { predicate1.analyzeImpl(analyzer); Assert.fail(); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } try { predicate2.analyzeImpl(analyzer); Assert.fail(); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java index 0f697a6cce0edf..4d78e7393c813d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java @@ -173,6 +173,7 @@ public void testNormal() { Assert.assertEquals("testCluster:testDb", stmt.getDbName()); Assert.assertEquals(DataSyncJobType.CANAL, stmt.getDataSyncJobType()); } catch (UserException e) { + // CHECKSTYLE IGNORE THIS LINE } } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDbStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDbStmtTest.java index cdd0d233a1df91..cb8647f0c92d6f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDbStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDbStmtTest.java @@ -69,11 +69,11 @@ public void testAnalyzeIcebergNormal() throws UserException { CreateDbStmt stmt = new CreateDbStmt(false, "test", properties); stmt.analyze(analyzer); Assert.assertEquals("testCluster:test", stmt.getFullDbName()); - Assert.assertEquals("CREATE DATABASE `testCluster:test`\n" + - "PROPERTIES (\n" + - "\"iceberg.database\" = \"doris\",\n" + - "\"iceberg.hive.metastore.uris\" = \"thrift://127.0.0.1:9087\"\n" + - ")", stmt.toString()); + Assert.assertEquals("CREATE DATABASE `testCluster:test`\n" + + "PROPERTIES (\n" + + "\"iceberg.database\" = \"doris\",\n" + + "\"iceberg.hive.metastore.uris\" = \"thrift://127.0.0.1:9087\"\n" + + ")", stmt.toString()); } @Test(expected = AnalysisException.class) diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java index 31abb892a45c0f..27b58d4d5883df 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java @@ -1241,7 +1241,7 @@ public void testKeepScaleAndPrecisionOfType(@Injectable SelectStmt selectStmt, } }; MVColumnItem mvColumnItem = Deencapsulation.invoke(createMaterializedViewStmt, "buildMVColumnItem", functionCallExpr); - Assert.assertEquals(50, ((ScalarType)mvColumnItem.getType()).getLength()); + Assert.assertEquals(50, mvColumnItem.getType().getLength()); SlotRef slotRef2 = new SlotRef(new TableName("db", "table"), "a"); List params2 = Lists.newArrayList(); @@ -1253,12 +1253,12 @@ public void testKeepScaleAndPrecisionOfType(@Injectable SelectStmt selectStmt, slotDescriptor2.getColumn(); result = column2; column2.getOriginType(); - result = ScalarType.createDecimalV2Type(10,1); + result = ScalarType.createDecimalV2Type(10, 1); } }; MVColumnItem mvColumnItem2 = Deencapsulation.invoke(createMaterializedViewStmt, "buildMVColumnItem", functionCallExpr2); - Assert.assertEquals(new Integer(10), ((ScalarType)mvColumnItem2.getType()).getPrecision()); - Assert.assertEquals(1, ((ScalarType)mvColumnItem2.getType()).getScalarScale()); + Assert.assertEquals(new Integer(10), mvColumnItem2.getType().getPrecision()); + Assert.assertEquals(1, ((ScalarType) mvColumnItem2.getType()).getScalarScale()); SlotRef slotRef3 = new SlotRef(new TableName("db", "table"), "a"); List params3 = Lists.newArrayList(); @@ -1274,6 +1274,6 @@ public void testKeepScaleAndPrecisionOfType(@Injectable SelectStmt selectStmt, } }; MVColumnItem mvColumnItem3 = Deencapsulation.invoke(createMaterializedViewStmt, "buildMVColumnItem", functionCallExpr3); - Assert.assertEquals(5, ((ScalarType)mvColumnItem3.getType()).getLength()); + Assert.assertEquals(5, mvColumnItem3.getType().getLength()); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateRoutineLoadStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateRoutineLoadStmtTest.java index 125cfc8ba11867..bbc27b85769a0d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateRoutineLoadStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateRoutineLoadStmtTest.java @@ -98,9 +98,6 @@ public void testAnalyzeWithDuplicateProperty(@Injectable Analyzer analyzer) thro String topicName = "topic1"; String serverAddress = "http://127.0.0.1:8080"; String kafkaPartitionString = "1,2,3"; - List partitionNameString = Lists.newArrayList(); - partitionNameString.add("p1"); - PartitionNames partitionNames = new PartitionNames(false, partitionNameString); Separator columnSeparator = new Separator(","); // duplicate load property @@ -153,7 +150,6 @@ public void testAnalyze(@Injectable Analyzer analyzer, Separator columnSeparator = new Separator(","); // duplicate load property - TableName tableName = new TableName(dbName, tableNameString); List loadPropertyList = new ArrayList<>(); loadPropertyList.add(columnSeparator); loadPropertyList.add(partitionNames); @@ -178,7 +174,7 @@ public void analyze(Analyzer analyzer1) { } }; - new Expectations(){ + new Expectations() { { ctx.getSessionVariable(); result = sessionVariable; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateTableStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateTableStmtTest.java index a9ce32b84ee176..83c85251aecf3e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateTableStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateTableStmtTest.java @@ -267,12 +267,12 @@ public void testCreateIcebergTable() throws UserException { CreateTableStmt stmt = new CreateTableStmt(false, true, tblName, "iceberg", properties, ""); stmt.analyze(analyzer); - Assert.assertEquals("CREATE EXTERNAL TABLE `testCluster:db1`.`table1` (\n" + - "\n" + - ") ENGINE = iceberg\n" + - "PROPERTIES (\"iceberg.database\" = \"doris\",\n" + - "\"iceberg.hive.metastore.uris\" = \"thrift://127.0.0.1:9087\",\n" + - "\"iceberg.table\" = \"test\")", stmt.toString()); + Assert.assertEquals("CREATE EXTERNAL TABLE `testCluster:db1`.`table1` (\n" + + "\n" + + ") ENGINE = iceberg\n" + + "PROPERTIES (\"iceberg.database\" = \"doris\",\n" + + "\"iceberg.hive.metastore.uris\" = \"thrift://127.0.0.1:9087\",\n" + + "\"iceberg.table\" = \"test\")", stmt.toString()); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/DataDescriptionTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/DataDescriptionTest.java index 8609f69d5f0346..247b838646dd78 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/DataDescriptionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/DataDescriptionTest.java @@ -214,7 +214,7 @@ public void testNormal() throws AnalysisException { properties.put("jsonpaths", "[\"$.h1.h2.k1\",\"$.h1.h2.v1\",\"$.h1.h2.v2\"]"); properties.put("json_root", "$.RECORDS"); properties.put("read_json_by_line", "true"); - properties.put("num_as_string","true"); + properties.put("num_as_string", "true"); desc = new DataDescription("testTable", null, Lists.newArrayList("abc.txt"), Lists.newArrayList("col1", "col2"), new Separator(","), "json", null, false, null, null, null, LoadTask.MergeType.APPEND, null, null, properties); @@ -341,7 +341,7 @@ public void testAnalyzeSequenceColumnNormal() throws AnalysisException { tbl.hasSequenceCol(); minTimes = 0; - result =true; + result = true; } }; desc.analyze("testDb"); @@ -360,7 +360,7 @@ public void testAnalyzeSequenceColumnWithoutSourceSequence() throws AnalysisExce tbl.hasSequenceCol(); minTimes = 0; - result =true; + result = true; } }; desc.analyze("testDb"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/DateLiteralTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/DateLiteralTest.java index 0a91c7f64c5292..64b55c43d7aa89 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/DateLiteralTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/DateLiteralTest.java @@ -93,7 +93,7 @@ public void testDateFormat() { } @Test - public void testParseDateTimeToHourORMinute() throws Exception{ + public void testParseDateTimeToHourORMinute() throws Exception { String s = "2020-12-13 12:13:14"; Type type = Type.DATETIME; DateLiteral literal = new DateLiteral(s, type); @@ -130,16 +130,16 @@ public void testCheckDate() { boolean hasException = false; try { DateLiteral dateLiteral = new DateLiteral(); - dateLiteral.fromDateFormatStr("%Y%m%d","19971007", false); + dateLiteral.fromDateFormatStr("%Y%m%d", "19971007", false); Assert.assertFalse(Deencapsulation.invoke(dateLiteral, "checkDate")); - dateLiteral.fromDateFormatStr("%Y%m%d","19970007", false); + dateLiteral.fromDateFormatStr("%Y%m%d", "19970007", false); Assert.assertFalse(Deencapsulation.invoke(dateLiteral, "checkDate")); - dateLiteral.fromDateFormatStr("%Y%m%d","19971000", false); + dateLiteral.fromDateFormatStr("%Y%m%d", "19971000", false); Assert.assertFalse(Deencapsulation.invoke(dateLiteral, "checkDate")); - dateLiteral.fromDateFormatStr("%Y%m%d","20000229", false); + dateLiteral.fromDateFormatStr("%Y%m%d", "20000229", false); Assert.assertFalse(Deencapsulation.invoke(dateLiteral, "checkDate")); } catch (InvalidFormatException e) { @@ -154,7 +154,7 @@ public void testCheckRange() { boolean hasException = false; try { DateLiteral dateLiteral = new DateLiteral(); - dateLiteral.fromDateFormatStr("%Y%m%d%H%i%s%f","20201209123456123456", false); + dateLiteral.fromDateFormatStr("%Y%m%d%H%i%s%f", "20201209123456123456", false); Assert.assertFalse(Deencapsulation.invoke(dateLiteral, "checkRange")); } catch (InvalidFormatException e) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ExplainTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ExplainTest.java index f94e69b142a575..ef99eb57d5d352 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ExplainTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ExplainTest.java @@ -33,33 +33,33 @@ public void before(ConnectContext ctx) throws Exception { CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx); Catalog.getCurrentCatalog().createDb(createDbStmt); - String t1 = ("CREATE TABLE test_explain.explain_t1 (\n" + - " `dt` int(11) COMMENT \"\",\n" + - " `id` int(11) COMMENT \"\",\n" + - " `value` varchar(8) COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `id`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + String t1 = ("CREATE TABLE test_explain.explain_t1 (\n" + + " `dt` int(11) COMMENT \"\",\n" + + " `id` int(11) COMMENT \"\",\n" + + " `value` varchar(8) COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `id`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(t1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); - String t2 =("CREATE TABLE test_explain.explain_t2 (\n" + - " `dt` bigint(11) COMMENT \"\",\n" + - " `id` bigint(11) COMMENT \"\",\n" + - " `value` bigint(8) COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `id`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + String t2 = ("CREATE TABLE test_explain.explain_t2 (\n" + + " `dt` bigint(11) COMMENT \"\",\n" + + " `id` bigint(11) COMMENT \"\",\n" + + " `value` bigint(8) COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `id`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(t2, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java index 017c3e9bffdeac..34c82453e05659 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java @@ -96,7 +96,7 @@ public void testGroupingSets() { + ".`k3`, `testdb`.`t`.`k2`), (`testdb`.`t`.`k1`, `testdb`.`t`.`k3`), (`testdb`.`t`.`k4`), (`testdb`" + ".`t`.`k1`, `testdb`.`t`.`k2`, `testdb`.`t`.`k3`, `testdb`.`t`.`k4`))", groupByClause.toSql()); List bitSetList = groupingInfo.getGroupingIdList(); - { + { // CHECKSTYLE IGNORE THIS LINE String[] answer = {"{0, 1, 2, 3}", "{0, 1}", "{0, 2}", "{3}"}; Set answerSet = new HashSet(Arrays.asList(answer)); Set resultSet = new HashSet<>(); @@ -105,7 +105,7 @@ public void testGroupingSets() { resultSet.add(s); } Assert.assertEquals(answerSet, resultSet); - } + } // CHECKSTYLE IGNORE THIS LINE } @Test @@ -134,7 +134,7 @@ public void testRollUp() { Assert.assertEquals("ROLLUP (`testdb`.`t`.`k2`, `testdb`.`t`.`k3`, " + "`testdb`.`t`.`k4`, `testdb`.`t`.`k3`)", groupByClause.toSql()); List bitSetList = groupingInfo.getGroupingIdList(); - { + { // CHECKSTYLE IGNORE THIS LINE String[] answer = {"{}", "{0}", "{0, 1}", "{0, 1, 2}"}; Set answerSet = new HashSet(Arrays.asList(answer)); Set resultSet = new HashSet<>(); @@ -143,7 +143,7 @@ public void testRollUp() { resultSet.add(s); } Assert.assertEquals(answerSet, resultSet); - } + } // CHECKSTYLE IGNORE THIS LINE } @Test @@ -171,7 +171,7 @@ public void testCube() { Assert.assertEquals(4, groupByClause.getGroupingExprs().size()); List bitSetList = groupingInfo.getGroupingIdList(); - { + { // CHECKSTYLE IGNORE THIS LINE String[] answer = {"{}", "{1}", "{0}", "{0, 1}", "{2}", "{1, 2}", "{0, 1, 2}", "{0, 2}"}; Set answerSet = new HashSet(Arrays.asList(answer)); Set resultSet = new HashSet<>(); @@ -181,7 +181,7 @@ public void testCube() { } Assert.assertEquals(answerSet, resultSet); - } + } // CHECKSTYLE IGNORE THIS LINE } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java index b381a77f26a860..3dca3638d2a49a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java @@ -71,14 +71,13 @@ private static InsertStmt parseAndAnalyze(String sql) throws Exception { Analyzer analyzer = new Analyzer(connectContext.getCatalog(), connectContext); insertStmt.analyze(analyzer); return insertStmt; - }; + } @Test public void testInsertArrayStmt() throws Exception { - ExceptionChecker.expectThrowsNoException(() -> { - createTable("create table test.table1 (k1 INT, v1 Array) duplicate key (k1) " + - " distributed by hash (k1) buckets 1 properties ('replication_num' = '1');"); - }); + ExceptionChecker.expectThrowsNoException( + () -> createTable("create table test.table1 (k1 INT, v1 Array) duplicate key (k1) " + + " distributed by hash (k1) buckets 1 properties ('replication_num' = '1');")); connectContext.setQueryId(new TUniqueId(1, 0)); InsertStmt insertStmt = parseAndAnalyze("insert into test.table1 values (1, [1, 2, 3]);"); @@ -107,8 +106,7 @@ public void testInsertArrayStmt() throws Exception { Assert.assertSame(PrimitiveType.INT, ((ArrayType) arrayLiteral.getType()).getItemType().getPrimitiveType()); connectContext.setQueryId(new TUniqueId(3, 0)); - ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "type not match", ()-> { - parseAndAnalyze("insert into test.table1 values (1, [[1, 2], [3, 4]]);"); - }); + ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "type not match", + () -> parseAndAnalyze("insert into test.table1 values (1, [[1, 2], [3, 4]]);")); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertStmtTest.java index 9f4907cd7a441d..6ed8a5aed0764e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertStmtTest.java @@ -61,7 +61,7 @@ public static void setUp() throws Exception { dorisAssert.withDatabase("db").useDatabase("db"); dorisAssert.withTable(createTblStmtStr); - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); + UtFrameUtils.createDefaultCtx(); } List getBaseSchema() { @@ -126,7 +126,7 @@ List getFullSchema() throws Exception { v3.setIsAllowNull(false); ArrayList params = new ArrayList<>(); - SlotRef slotRef = new SlotRef(null , "k1"); + SlotRef slotRef = new SlotRef(null, "k1"); slotRef.setType(Type.BIGINT); params.add(slotRef.uncheckedCastTo(Type.VARCHAR)); @@ -176,11 +176,16 @@ public void testNormal() throws Exception { QueryStmt queryStmt = (QueryStmt) statementBase; - new Expectations() {{ - targetTable.getBaseSchema(); result = getBaseSchema(); - targetTable.getBaseSchema(anyBoolean); result = getBaseSchema(); - targetTable.getFullSchema(); result = getFullSchema(); - }}; + new Expectations() { + { + targetTable.getBaseSchema(); + result = getBaseSchema(); + targetTable.getBaseSchema(anyBoolean); + result = getBaseSchema(); + targetTable.getFullSchema(); + result = getFullSchema(); + } + }; InsertStmt stmt = new InsertStmt(target, "label", null, source, new ArrayList<>()); @@ -234,11 +239,16 @@ public void testInsertSelect() throws Exception { QueryStmt queryStmt = (QueryStmt) statementBase; - new Expectations() {{ - targetTable.getBaseSchema(); result = getBaseSchema(); - targetTable.getBaseSchema(anyBoolean); result = getBaseSchema(); - targetTable.getFullSchema(); result = getFullSchema(); - }}; + new Expectations() { + { + targetTable.getBaseSchema(); + result = getBaseSchema(); + targetTable.getBaseSchema(anyBoolean); + result = getBaseSchema(); + targetTable.getFullSchema(); + result = getFullSchema(); + } + }; InsertStmt stmt = new InsertStmt(target, "label", null, source, new ArrayList<>()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InstallPluginStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InstallPluginStmtTest.java index 23f29167c8c861..d7e09f2f2a948b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InstallPluginStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InstallPluginStmtTest.java @@ -57,8 +57,8 @@ public void testNormal() throws UserException { InstallPluginStmt stmt = new InstallPluginStmt("http://test/test.zip", properties); stmt.analyze(analyzer); Assert.assertEquals("7529db41471ec72e165f96fe9fb92742", stmt.getMd5sum()); - Assert.assertEquals("INSTALL PLUGIN FROM \"http://test/test.zip\"\n" + - "PROPERTIES (\"md5sum\" = \"7529db41471ec72e165f96fe9fb92742\")", stmt.toString()); + Assert.assertEquals("INSTALL PLUGIN FROM \"http://test/test.zip\"\n" + + "PROPERTIES (\"md5sum\" = \"7529db41471ec72e165f96fe9fb92742\")", stmt.toString()); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ListPartitionPrunerTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ListPartitionPrunerTest.java index 763909b51433ba..d7bae60c8f564f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ListPartitionPrunerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ListPartitionPrunerTest.java @@ -29,52 +29,52 @@ protected void runBeforeAll() throws Exception { createDatabase("test"); String createSinglePartColWithSinglePartKey = - "create table test.t1\n" - + "(k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int)\n" - + "partition by list(k1)\n" - + "(\n" - + "partition p1 values in (\"1\"),\n" - + "partition p2 values in (\"2\")\n" - + ")\n" - + "distributed by hash(k2) buckets 1\n" - + "properties('replication_num' = '1');"; + "create table test.t1\n" + + "(k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int)\n" + + "partition by list(k1)\n" + + "(\n" + + "partition p1 values in (\"1\"),\n" + + "partition p2 values in (\"2\")\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; String createSinglePartColWithMultiPartKey = - "create table test.t2\n" - + "(k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int)\n" - + "partition by list(k1)\n" - + "(\n" - + "partition p1 values in (\"1\", \"3\", \"5\"),\n" - + "partition p2 values in (\"2\", \"4\", \"6\"),\n" - + "partition p3 values in (\"7\", \"8\")\n" - + ")\n" - + "distributed by hash(k2) buckets 1\n" - + "properties('replication_num' = '1');"; + "create table test.t2\n" + + "(k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int)\n" + + "partition by list(k1)\n" + + "(\n" + + "partition p1 values in (\"1\", \"3\", \"5\"),\n" + + "partition p2 values in (\"2\", \"4\", \"6\"),\n" + + "partition p3 values in (\"7\", \"8\")\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; String createMultiPartColWithSinglePartKey = - "create table test.t3\n" - + "(k1 int not null, k2 varchar(128) not null, k3 int, v1 int, v2 int)\n" - + "partition by list(k1, k2)\n" - + "(\n" - + "partition p1 values in ((\"1\", \"beijing\")),\n" - + "partition p2 values in ((\"2\", \"beijing\"))\n" - + ")\n" - + "distributed by hash(k2) buckets 1\n" - + "properties('replication_num' = '1');"; + "create table test.t3\n" + + "(k1 int not null, k2 varchar(128) not null, k3 int, v1 int, v2 int)\n" + + "partition by list(k1, k2)\n" + + "(\n" + + "partition p1 values in ((\"1\", \"beijing\")),\n" + + "partition p2 values in ((\"2\", \"beijing\"))\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; String createMultiPartColWithMultiPartKey = - "create table test.t4\n" - + "(k1 int not null, k2 varchar(128) not null, k3 int, v1 int, v2 int)\n" - + "partition by list(k1, k2)\n" - + "(\n" - + "partition p1 values in ((\"1\", \"beijing\"), (\"2\", \"shanghai\")),\n" - + "partition p2 values in ((\"2\", \"beijing\")),\n" - + "partition p3 values in ((\"3\", \"tianjin\"), (\"1\", \"shanghai\"))\n" - + ")\n" - + "distributed by hash(k2) buckets 1\n" - + "properties('replication_num' = '1');"; + "create table test.t4\n" + + "(k1 int not null, k2 varchar(128) not null, k3 int, v1 int, v2 int)\n" + + "partition by list(k1, k2)\n" + + "(\n" + + "partition p1 values in ((\"1\", \"beijing\"), (\"2\", \"shanghai\")),\n" + + "partition p2 values in ((\"2\", \"beijing\")),\n" + + "partition p3 values in ((\"3\", \"tianjin\"), (\"1\", \"shanghai\"))\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; createTables(createSinglePartColWithSinglePartKey, - createSinglePartColWithMultiPartKey, - createMultiPartColWithSinglePartKey, - createMultiPartColWithMultiPartKey); + createSinglePartColWithMultiPartKey, + createMultiPartColWithSinglePartKey, + createMultiPartColWithMultiPartKey); } private void initTestCases() { diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/LiteralExprCompareTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/LiteralExprCompareTest.java index b8ed2c80877497..4a0f556ea5dfb4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/LiteralExprCompareTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/LiteralExprCompareTest.java @@ -57,13 +57,13 @@ public void boolTest() { @Test(expected = AnalysisException.class) public void dateFormat1Test() throws AnalysisException { - LiteralExpr date = new DateLiteral("2015-02-15 12:12:12", ScalarType.DATE); + new DateLiteral("2015-02-15 12:12:12", ScalarType.DATE); Assert.fail(); } @Test(expected = AnalysisException.class) public void dateFormat2Test() throws AnalysisException { - LiteralExpr datetime = new DateLiteral("2015-02-15", ScalarType.DATETIME); + new DateLiteral("2015-02-15", ScalarType.DATETIME); Assert.fail(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/LoadStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/LoadStmtTest.java index 2d2a5c251817d6..f5da3d12329f89 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/LoadStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/LoadStmtTest.java @@ -84,7 +84,7 @@ public void testNormal(@Injectable DataDescription desc, @Mocked Catalog catalog String resourceName = "spark0"; SparkResource resource = new SparkResource(resourceName); - new Expectations(){ + new Expectations() { { desc.getMergeType(); result = LoadTask.MergeType.APPEND; @@ -138,7 +138,7 @@ public void testNoData() throws UserException, AnalysisException { } @Test - public void testRewrite() throws Exception{ + public void testRewrite() throws Exception { LoadTaskInfo.ImportColumnDescs columnDescs = new LoadTaskInfo.ImportColumnDescs(); List columns1 = getColumns("c1,c2,c3,tmp_c4=c1 + 1, tmp_c5 = tmp_c4+1"); columnDescs.descs = columns1; @@ -180,8 +180,6 @@ public void testRewrite() throws Exception{ private List getColumns(String columns) throws Exception { String columnsSQL = "COLUMNS (" + columns + ")"; return ((ImportColumnsStmt) SqlParserUtils.getFirstStmt( - new SqlParser( - new SqlScanner( - new StringReader(columnsSQL))))).getColumns(); + new SqlParser(new SqlScanner(new StringReader(columnsSQL))))).getColumns(); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/PartitionPruneTestBase.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/PartitionPruneTestBase.java index 080904cd60c323..95c4790bea917e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/PartitionPruneTestBase.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/PartitionPruneTestBase.java @@ -37,9 +37,8 @@ protected void doTest() throws Exception { } private void assertExplainContains(int version, String sql, String subString) throws Exception { - Assert.assertTrue(String.format("version=%d, sql=%s, expectResult=%s", - version, sql, subString), - getSQLPlanOrErrorMsg("explain " + sql).contains(subString)); + Assert.assertTrue(String.format("version=%d, sql=%s, expectResult=%s", version, sql, subString), + getSQLPlanOrErrorMsg("explain " + sql).contains(subString)); } protected void addCase(String sql, String v1Result, String v2Result) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/QueryStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/QueryStmtTest.java index ba57dc2abc6650..f21f291ceb5318 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/QueryStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/QueryStmtTest.java @@ -55,21 +55,21 @@ public static void setUp() throws Exception { + "AGGREGATE KEY(k1, k2,k3,k4) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; String createBaseAllStmtStr = "create table db1.baseall(k1 int, k2 varchar(32)) distributed by hash(k1) " + "buckets 3 properties('replication_num' = '1');"; - String tbl1 = "CREATE TABLE db1.table1 (\n" + - " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + - " `citycode` smallint(6) NULL COMMENT \"\",\n" + - " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + - " `workDateTime` datetime NOT NULL COMMENT \"\",\n" + - " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"; + String tbl1 = "CREATE TABLE db1.table1 (\n" + + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + + " `citycode` smallint(6) NULL COMMENT \"\",\n" + + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + + " `workDateTime` datetime NOT NULL COMMENT \"\",\n" + + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"; dorisAssert = new DorisAssert(); dorisAssert.withDatabase("db1").useDatabase("db1"); dorisAssert.withTable(createTblStmtStr) @@ -81,20 +81,20 @@ public static void setUp() throws Exception { public void testCollectExprs() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); Analyzer analyzer = new Analyzer(ctx.getCatalog(), ctx); - String sql = "SELECT CASE\n" + - " WHEN (\n" + - " SELECT COUNT(*) / 2\n" + - " FROM db1.tbl1\n" + - " ) > k4 THEN (\n" + - " SELECT AVG(k4)\n" + - " FROM db1.tbl1\n" + - " )\n" + - " ELSE (\n" + - " SELECT SUM(k4)\n" + - " FROM db1.tbl1\n" + - " )\n" + - " END AS kk4\n" + - "FROM db1.tbl1;"; + String sql = "SELECT CASE\n" + + " WHEN (\n" + + " SELECT COUNT(*) / 2\n" + + " FROM db1.tbl1\n" + + " ) > k4 THEN (\n" + + " SELECT AVG(k4)\n" + + " FROM db1.tbl1\n" + + " )\n" + + " ELSE (\n" + + " SELECT SUM(k4)\n" + + " FROM db1.tbl1\n" + + " )\n" + + " END AS kk4\n" + + "FROM db1.tbl1;"; QueryStmt stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); Map exprsMap = new HashMap<>(); stmt.collectExprs(exprsMap); @@ -102,21 +102,21 @@ public void testCollectExprs() throws Exception { Map constMap = getConstantExprMap(exprsMap, analyzer); Assert.assertEquals(0, constMap.size()); - sql = "SELECT username\n" + - "FROM db1.table1\n" + - "WHERE siteid in\n" + - " (SELECT abs(5+abs(0))+1)\n" + - "UNION\n" + - "SELECT CASE\n" + - " WHEN\n" + - " (SELECT count(*)+abs(8)\n" + - " FROM db1.table1\n" + - " WHERE username='helen')>1 THEN 888\n" + - " ELSE 999\n" + - " END AS ccc\n" + - "FROM\n" + - " (SELECT curdate()) a " + - "ORDER BY curdate();"; + sql = "SELECT username\n" + + "FROM db1.table1\n" + + "WHERE siteid in\n" + + " (SELECT abs(5+abs(0))+1)\n" + + "UNION\n" + + "SELECT CASE\n" + + " WHEN\n" + + " (SELECT count(*)+abs(8)\n" + + " FROM db1.table1\n" + + " WHERE username='helen')>1 THEN 888\n" + + " ELSE 999\n" + + " END AS ccc\n" + + "FROM\n" + + " (SELECT curdate()) a " + + "ORDER BY curdate();"; stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); exprsMap.clear(); stmt.collectExprs(exprsMap); @@ -125,70 +125,70 @@ public void testCollectExprs() throws Exception { constMap = getConstantExprMap(exprsMap, analyzer); Assert.assertEquals(4, constMap.size()); - sql = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2,\n" + - " db1.tbl1 t3,\n" + - " db1.tbl1 t4,\n" + - " db1.tbl1 t5,\n" + - " db1.tbl1 t6\n" + - "where\n" + - " t2.k1 = t1.k1\n" + - " and t1.k2 = t6.k2\n" + - " and t6.k4 = 2001\n" + - " and(\n" + - " (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'D'\n" + - " and t4.k3 = '2 yr Degree'\n" + - " and t1.k4 between 100.00\n" + - " and 150.00\n" + - " and t4.k4 = 3\n" + - " )\n" + - " or (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'S'\n" + - " and t4.k3 = 'Secondary'\n" + - " and t1.k4 between 50.00\n" + - " and 100.00\n" + - " and t4.k4 = 1\n" + - " )\n" + - " or (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'W'\n" + - " and t4.k3 = 'Advanced Degree'\n" + - " and t1.k4 between 150.00\n" + - " and 200.00\n" + - " and t4.k4 = 1\n" + - " )\n" + - " )\n" + - " and(\n" + - " (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('CO', 'IL', 'MN')\n" + - " and t1.k4 between 100\n" + - " and 200\n" + - " )\n" + - " or (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('OH', 'MT', 'NM')\n" + - " and t1.k4 between 150\n" + - " and 300\n" + - " )\n" + - " or (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('TX', 'MO', 'MI')\n" + - " and t1.k4 between 50 and 250\n" + - " )\n" + - " );"; + sql = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2,\n" + + " db1.tbl1 t3,\n" + + " db1.tbl1 t4,\n" + + " db1.tbl1 t5,\n" + + " db1.tbl1 t6\n" + + "where\n" + + " t2.k1 = t1.k1\n" + + " and t1.k2 = t6.k2\n" + + " and t6.k4 = 2001\n" + + " and(\n" + + " (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'D'\n" + + " and t4.k3 = '2 yr Degree'\n" + + " and t1.k4 between 100.00\n" + + " and 150.00\n" + + " and t4.k4 = 3\n" + + " )\n" + + " or (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'S'\n" + + " and t4.k3 = 'Secondary'\n" + + " and t1.k4 between 50.00\n" + + " and 100.00\n" + + " and t4.k4 = 1\n" + + " )\n" + + " or (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'W'\n" + + " and t4.k3 = 'Advanced Degree'\n" + + " and t1.k4 between 150.00\n" + + " and 200.00\n" + + " and t4.k4 = 1\n" + + " )\n" + + " )\n" + + " and(\n" + + " (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('CO', 'IL', 'MN')\n" + + " and t1.k4 between 100\n" + + " and 200\n" + + " )\n" + + " or (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('OH', 'MT', 'NM')\n" + + " and t1.k4 between 150\n" + + " and 300\n" + + " )\n" + + " or (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('TX', 'MO', 'MI')\n" + + " and t1.k4 between 50 and 250\n" + + " )\n" + + " );"; stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); exprsMap.clear(); stmt.collectExprs(exprsMap); @@ -197,8 +197,8 @@ public void testCollectExprs() throws Exception { constMap = getConstantExprMap(exprsMap, analyzer); Assert.assertEquals(0, constMap.size()); - sql = "SELECT k1 FROM db1.baseall GROUP BY k1 HAVING EXISTS(SELECT k4 FROM db1.tbl1 GROUP BY k4 " + - "HAVING SUM(k4) = k4);"; + sql = "SELECT k1 FROM db1.baseall GROUP BY k1 HAVING EXISTS(SELECT k4 FROM db1.tbl1 GROUP BY k4 " + + "HAVING SUM(k4) = k4);"; stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); exprsMap.clear(); stmt.collectExprs(exprsMap); @@ -218,29 +218,29 @@ public void testCollectExprs() throws Exception { Assert.assertEquals(1, constMap.size()); // expr in subquery associate with column in grandparent level - sql = "WITH aa AS\n" + - " (SELECT DATE_FORMAT(workDateTime, '%Y-%m') mon,\n" + - " siteid\n" + - " FROM db1.table1\n" + - " WHERE workDateTime >= concat(year(now())-1, '-01-01 00:00:00')\n" + - " AND workDateTime < now()\n" + - " GROUP BY siteid,\n" + - " DATE_FORMAT(workDateTime, '%Y-%m')),\n" + - " bb AS\n" + - " (SELECT mon,\n" + - " count(DISTINCT siteid) total\n" + - " FROM aa\n" + - " GROUP BY mon),\n" + - " cc AS\n" + - " (SELECT mon,\n" + - " count(DISTINCT siteid) num\n" + - " FROM aa\n" + - " GROUP BY mon)\n" + - "SELECT bb.mon,\n" + - " round(cc.num / bb.total, 4) rate\n" + - "FROM bb\n" + - "LEFT JOIN cc ON cc.mon = bb.mon\n" + - "ORDER BY mon;"; + sql = "WITH aa AS\n" + + " (SELECT DATE_FORMAT(workDateTime, '%Y-%m') mon,\n" + + " siteid\n" + + " FROM db1.table1\n" + + " WHERE workDateTime >= concat(year(now())-1, '-01-01 00:00:00')\n" + + " AND workDateTime < now()\n" + + " GROUP BY siteid,\n" + + " DATE_FORMAT(workDateTime, '%Y-%m')),\n" + + " bb AS\n" + + " (SELECT mon,\n" + + " count(DISTINCT siteid) total\n" + + " FROM aa\n" + + " GROUP BY mon),\n" + + " cc AS\n" + + " (SELECT mon,\n" + + " count(DISTINCT siteid) num\n" + + " FROM aa\n" + + " GROUP BY mon)\n" + + "SELECT bb.mon,\n" + + " round(cc.num / bb.total, 4) rate\n" + + "FROM bb\n" + + "LEFT JOIN cc ON cc.mon = bb.mon\n" + + "ORDER BY mon;"; stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); exprsMap.clear(); stmt.collectExprs(exprsMap); @@ -253,20 +253,20 @@ public void testCollectExprs() throws Exception { @Test public void testPutBackExprs() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "SELECT username, @@license, @@time_zone\n" + - "FROM db1.table1\n" + - "WHERE siteid in\n" + - " (SELECT abs(5+abs(0))+1)\n" + - "UNION\n" + - "SELECT CASE\n" + - " WHEN\n" + - " (SELECT count(*)+abs(8)\n" + - " FROM db1.table1\n" + - " WHERE username='helen')>1 THEN 888\n" + - " ELSE 999\n" + - " END AS ccc, @@language, @@storage_engine\n" + - "FROM\n" + - " (SELECT curdate()) a;"; + String sql = "SELECT username, @@license, @@time_zone\n" + + "FROM db1.table1\n" + + "WHERE siteid in\n" + + " (SELECT abs(5+abs(0))+1)\n" + + "UNION\n" + + "SELECT CASE\n" + + " WHEN\n" + + " (SELECT count(*)+abs(8)\n" + + " FROM db1.table1\n" + + " WHERE username='helen')>1 THEN 888\n" + + " ELSE 999\n" + + " END AS ccc, @@language, @@storage_engine\n" + + "FROM\n" + + " (SELECT curdate()) a;"; StatementBase stmt = UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); stmt.foldConstant(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); @@ -276,19 +276,19 @@ public void testPutBackExprs() throws Exception { Assert.assertTrue(stmt.toSql().contains("/palo/share/english/")); // test sysVariableDescs - sql = "SELECT\n" + - " avg(t1.k4)\n" + - "FROM\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "WHERE\n" + - "(\n" + - " t2.k2 = 'United States'\n" + - " AND t2.k3 in (@@license, @@version)\n" + - ")\n" + - "OR (\n" + - " t2.k2 = @@language\n" + - ")"; + sql = "SELECT\n" + + " avg(t1.k4)\n" + + "FROM\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "WHERE\n" + + "(\n" + + " t2.k2 = 'United States'\n" + + " AND t2.k3 in (@@license, @@version)\n" + + ")\n" + + "OR (\n" + + " t2.k2 = @@language\n" + + ")"; stmt = UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); stmt.foldConstant(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); // reAnalyze @@ -297,19 +297,19 @@ public void testPutBackExprs() throws Exception { Assert.assertTrue(stmt.toSql().contains("/palo/share/english/")); // test informationFunctions - sql = "SELECT\n" + - " avg(t1.k4)\n" + - "FROM\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "WHERE\n" + - "(\n" + - " t2.k2 = 'United States'\n" + - " AND t2.k1 in (USER(), CURRENT_USER(), SCHEMA())\n" + - ")\n" + - "OR (\n" + - " t2.k2 = CONNECTION_ID()\n" + - ")"; + sql = "SELECT\n" + + " avg(t1.k4)\n" + + "FROM\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "WHERE\n" + + "(\n" + + " t2.k2 = 'United States'\n" + + " AND t2.k1 in (USER(), CURRENT_USER(), SCHEMA())\n" + + ")\n" + + "OR (\n" + + " t2.k2 = CONNECTION_ID()\n" + + ")"; stmt = UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); stmt.foldConstant(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); // reAnalyze @@ -318,11 +318,11 @@ public void testPutBackExprs() throws Exception { Assert.assertTrue(stmt.toSql().contains("root''@''127.0.0.1")); // inline view - sql = "SELECT\n" + - " t1.k1, t2.k1\n" + - "FROM\n" + - " (select USER() k1, CURRENT_USER() k2, SCHEMA() k3) t1,\n" + - " (select @@license k1, @@version k2) t2\n"; + sql = "SELECT\n" + + " t1.k1, t2.k1\n" + + "FROM\n" + + " (select USER() k1, CURRENT_USER() k2, SCHEMA() k3) t1,\n" + + " (select @@license k1, @@version k2) t2\n"; stmt = UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); stmt.foldConstant(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); // reAnalyze @@ -335,7 +335,7 @@ public void testPutBackExprs() throws Exception { private void reAnalyze(StatementBase stmt, ConnectContext ctx) throws UserException { // reAnalyze List origResultTypes = Lists.newArrayList(); - for (Expr e: stmt.getResultExprs()) { + for (Expr e : stmt.getResultExprs()) { origResultTypes.add(e.getType()); } List origColLabels = @@ -361,7 +361,7 @@ private void reAnalyze(StatementBase stmt, ConnectContext ctx) throws UserExcept private Map getConstantExprMap(Map exprMap, Analyzer analyzer) throws AnalysisException { FoldConstantsRule rule = (FoldConstantsRule) FoldConstantsRule.INSTANCE; Map resultMap = new HashMap<>(); - for (Map.Entry entry : exprMap.entrySet()){ + for (Map.Entry entry : exprMap.entrySet()) { Map constMap = new HashMap<>(); Map oriConstMap = new HashMap<>(); Map sysVarMap = new HashMap<>(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangeCompareTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangeCompareTest.java index 95631b203f6769..0eb1606511719f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangeCompareTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangeCompareTest.java @@ -63,7 +63,7 @@ public void testWithoutIntersection() { LiteralExpr upperBoundOfRange2 = new IntLiteral(1); Range range2 = Range.lessThan(upperBoundOfRange2); try { - Range intersectionRange = range1.intersection(range2); + range1.intersection(range2); Assert.fail(); } catch (IllegalArgumentException e) { System.out.println(e); @@ -80,7 +80,7 @@ public void testIntersectionInvalidRange() throws AnalysisException { LiteralExpr upperBoundOfRange2 = new DecimalLiteral("0.1"); Range range2 = Range.lessThan(upperBoundOfRange2); try { - Range intersectionRange = range1.intersection(range2); + range1.intersection(range2); Assert.fail(); } catch (IllegalArgumentException e) { System.out.println(e); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java index 873b814b650b5f..52b52eb74f3946 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java @@ -29,85 +29,85 @@ protected void runBeforeAll() throws Exception { createDatabase("test"); String singleColumnPartitionTable = - "CREATE TABLE `test`.`t1` (\n" + - " `dt` int(11) NULL COMMENT \"\",\n" + - " `k1` int(11) NULL COMMENT \"\",\n" + - " `k2` int(11) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` int(11) NULL COMMENT \"\"\n" + - ") " + - "DUPLICATE KEY(`dt`, `k1`, `k2`, `k3`, `k4`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p20211121 VALUES LESS THAN (\"20211121\"),\n" + - "PARTITION p20211122 VALUES [(\"20211121\"), (\"20211122\")),\n" + - "PARTITION p20211123 VALUES [(\"20211122\"), (\"20211123\")),\n" + - "PARTITION p20211124 VALUES [(\"20211123\"), (\"20211124\")),\n" + - "PARTITION p20211125 VALUES [(\"20211124\"), (\"20211125\")),\n" + - "PARTITION p20211126 VALUES [(\"20211125\"), (\"20211126\")),\n" + - "PARTITION p20211127 VALUES [(\"20211126\"), (\"20211127\")),\n" + - "PARTITION p20211128 VALUES [(\"20211127\"), (\"20211128\")))\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 60\n" + - "PROPERTIES('replication_num' = '1');"; + "CREATE TABLE `test`.`t1` (\n" + + " `dt` int(11) NULL COMMENT \"\",\n" + + " `k1` int(11) NULL COMMENT \"\",\n" + + " `k2` int(11) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` int(11) NULL COMMENT \"\"\n" + + ") " + + "DUPLICATE KEY(`dt`, `k1`, `k2`, `k3`, `k4`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p20211121 VALUES LESS THAN (\"20211121\"),\n" + + "PARTITION p20211122 VALUES [(\"20211121\"), (\"20211122\")),\n" + + "PARTITION p20211123 VALUES [(\"20211122\"), (\"20211123\")),\n" + + "PARTITION p20211124 VALUES [(\"20211123\"), (\"20211124\")),\n" + + "PARTITION p20211125 VALUES [(\"20211124\"), (\"20211125\")),\n" + + "PARTITION p20211126 VALUES [(\"20211125\"), (\"20211126\")),\n" + + "PARTITION p20211127 VALUES [(\"20211126\"), (\"20211127\")),\n" + + "PARTITION p20211128 VALUES [(\"20211127\"), (\"20211128\")))\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 60\n" + + "PROPERTIES('replication_num' = '1');"; String notNullSingleColumnPartitionTable = - "CREATE TABLE `test`.`not_null` (\n" + - " `dt` int(11) NULL COMMENT \"\",\n" + - " `k1` int(11) NULL COMMENT \"\",\n" + - " `k2` int(11) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` int(11) NULL COMMENT \"\"\n" + - ") " + - "DUPLICATE KEY(`dt`, `k1`, `k2`, `k3`, `k4`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p20211122 VALUES [(\"20211121\"), (\"20211122\")),\n" + - "PARTITION p20211123 VALUES [(\"20211122\"), (\"20211123\")),\n" + - "PARTITION p20211124 VALUES [(\"20211123\"), (\"20211124\")),\n" + - "PARTITION p20211125 VALUES [(\"20211124\"), (\"20211125\")),\n" + - "PARTITION p20211126 VALUES [(\"20211125\"), (\"20211126\")),\n" + - "PARTITION p20211127 VALUES [(\"20211126\"), (\"20211127\")),\n" + - "PARTITION p20211128 VALUES [(\"20211127\"), (\"20211128\")))\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 60\n" + - "PROPERTIES('replication_num' = '1');"; + "CREATE TABLE `test`.`not_null` (\n" + + " `dt` int(11) NULL COMMENT \"\",\n" + + " `k1` int(11) NULL COMMENT \"\",\n" + + " `k2` int(11) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` int(11) NULL COMMENT \"\"\n" + + ") " + + "DUPLICATE KEY(`dt`, `k1`, `k2`, `k3`, `k4`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p20211122 VALUES [(\"20211121\"), (\"20211122\")),\n" + + "PARTITION p20211123 VALUES [(\"20211122\"), (\"20211123\")),\n" + + "PARTITION p20211124 VALUES [(\"20211123\"), (\"20211124\")),\n" + + "PARTITION p20211125 VALUES [(\"20211124\"), (\"20211125\")),\n" + + "PARTITION p20211126 VALUES [(\"20211125\"), (\"20211126\")),\n" + + "PARTITION p20211127 VALUES [(\"20211126\"), (\"20211127\")),\n" + + "PARTITION p20211128 VALUES [(\"20211127\"), (\"20211128\")))\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 60\n" + + "PROPERTIES('replication_num' = '1');"; String multipleColumnsPartitionTable = - "CREATE TABLE `test`.`t2` (\n" + - " `k1` int(11) NULL COMMENT \"\",\n" + - " `k2` int(11) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` int(11) NULL COMMENT \"\",\n" + - " `k5` int(11) NULL COMMENT \"\"\n" + - ") \n" + - "PARTITION BY RANGE(`k1`, `k2`)\n" + - "(PARTITION p1 VALUES LESS THAN (\"3\", \"1\"),\n" + - "PARTITION p2 VALUES [(\"3\", \"1\"), (\"7\", \"10\")),\n" + - "PARTITION p3 VALUES [(\"7\", \"10\"), (\"8\", \"5\")),\n" + - "PARTITION p4 VALUES [(\"10\", \"10\"), (\"12\", \"5\")),\n" + - "PARTITION p5 VALUES [(\"15\", \"6\"), (\"20\", \"11\")),\n" + - "PARTITION p6 VALUES [(\"20\", \"11\"), (\"22\", \"3\")),\n" + - "PARTITION p7 VALUES [(\"23\", \"3\"), (\"23\", \"4\")),\n" + - "PARTITION p8 VALUES [(\"23\", \"4\"), (\"23\", \"20\")),\n" + - "PARTITION p9 VALUES [(\"24\", \"1\"), (\"25\", \"9\")))\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 10\n" + - "PROPERTIES ('replication_num' = '1');"; + "CREATE TABLE `test`.`t2` (\n" + + " `k1` int(11) NULL COMMENT \"\",\n" + + " `k2` int(11) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` int(11) NULL COMMENT \"\",\n" + + " `k5` int(11) NULL COMMENT \"\"\n" + + ") \n" + + "PARTITION BY RANGE(`k1`, `k2`)\n" + + "(PARTITION p1 VALUES LESS THAN (\"3\", \"1\"),\n" + + "PARTITION p2 VALUES [(\"3\", \"1\"), (\"7\", \"10\")),\n" + + "PARTITION p3 VALUES [(\"7\", \"10\"), (\"8\", \"5\")),\n" + + "PARTITION p4 VALUES [(\"10\", \"10\"), (\"12\", \"5\")),\n" + + "PARTITION p5 VALUES [(\"15\", \"6\"), (\"20\", \"11\")),\n" + + "PARTITION p6 VALUES [(\"20\", \"11\"), (\"22\", \"3\")),\n" + + "PARTITION p7 VALUES [(\"23\", \"3\"), (\"23\", \"4\")),\n" + + "PARTITION p8 VALUES [(\"23\", \"4\"), (\"23\", \"20\")),\n" + + "PARTITION p9 VALUES [(\"24\", \"1\"), (\"25\", \"9\")))\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 10\n" + + "PROPERTIES ('replication_num' = '1');"; String notNullMultipleColumnsPartitionTable = - "CREATE TABLE `test`.`multi_not_null` (\n" + - " `k1` int(11) NULL COMMENT \"\",\n" + - " `k2` int(11) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` int(11) NULL COMMENT \"\",\n" + - " `k5` int(11) NULL COMMENT \"\"\n" + - ") \n" + - "PARTITION BY RANGE(`k1`, `k2`)\n" + - "(PARTITION p1 VALUES [(\"3\", \"1\"), (\"3\", \"3\")),\n" + - "PARTITION p2 VALUES [(\"4\", \"2\"), (\"4\", \"6\")))\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 10\n" + - "PROPERTIES ('replication_num' = '1');"; + "CREATE TABLE `test`.`multi_not_null` (\n" + + " `k1` int(11) NULL COMMENT \"\",\n" + + " `k2` int(11) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` int(11) NULL COMMENT \"\",\n" + + " `k5` int(11) NULL COMMENT \"\"\n" + + ") \n" + + "PARTITION BY RANGE(`k1`, `k2`)\n" + + "(PARTITION p1 VALUES [(\"3\", \"1\"), (\"3\", \"3\")),\n" + + "PARTITION p2 VALUES [(\"4\", \"2\"), (\"4\", \"6\")))\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 10\n" + + "PROPERTIES ('replication_num' = '1');"; createTables(singleColumnPartitionTable, - notNullSingleColumnPartitionTable, - multipleColumnsPartitionTable, - notNullMultipleColumnsPartitionTable); + notNullSingleColumnPartitionTable, + multipleColumnsPartitionTable, + notNullMultipleColumnsPartitionTable); } private void initTestCases() { diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/RoutineLoadDataSourcePropertiesTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/RoutineLoadDataSourcePropertiesTest.java index 89292045567575..6ceac2c71c8e2a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/RoutineLoadDataSourcePropertiesTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/RoutineLoadDataSourcePropertiesTest.java @@ -182,8 +182,9 @@ public void testCreateAbnormal() { dsProperties.analyze(); Assert.fail(); } catch (UserException e) { - Assert.assertTrue(e.getMessage().contains("The offset of the partition cannot be specified by the timestamp " + - "and the offset at the same time")); + Assert.assertTrue(e.getMessage().contains( + "The offset of the partition cannot be specified by the timestamp " + + "and the offset at the same time")); } // no partitions but has offset @@ -314,8 +315,9 @@ public void testAlterAbnormal() { dsProperties.analyze(); Assert.fail(); } catch (UserException e) { - Assert.assertTrue(e.getMessage().contains("The offset of the partition cannot be specified by the timestamp " + - "and the offset at the same time")); + Assert.assertTrue(e.getMessage().contains( + "The offset of the partition cannot be specified by the timestamp " + + "and the offset at the same time")); } // no partitions but has offset diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java index 0c82aed559eac7..a1d35a5b1b4ec7 100755 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java @@ -42,7 +42,7 @@ import java.util.UUID; public class SelectStmtTest { - private static String runningDir = "fe/mocked/DemoTest/" + UUID.randomUUID().toString() + "/"; + private static final String runningDir = "fe/mocked/DemoTest/" + UUID.randomUUID() + "/"; private static DorisAssert dorisAssert; @Rule @@ -58,68 +58,70 @@ public static void setUp() throws Exception { Config.enable_batch_delete_by_default = true; Config.enable_http_server_v2 = false; UtFrameUtils.createDorisCluster(runningDir); - String createTblStmtStr = "create table db1.tbl1(k1 varchar(32), k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " - + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; + String createTblStmtStr = "create table db1.tbl1(k1 varchar(32)," + + " k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " + + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3" + + " properties('replication_num' = '1');"; String createBaseAllStmtStr = "create table db1.baseall(k1 int, k2 varchar(32)) distributed by hash(k1) " + "buckets 3 properties('replication_num' = '1');"; - String createPratitionTableStr = "CREATE TABLE db1.partition_table (\n" + - "datekey int(11) NULL COMMENT \"datekey\",\n" + - "poi_id bigint(20) NULL COMMENT \"poi_id\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(datekey, poi_id)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(datekey)\n" + - "(PARTITION p20200727 VALUES [(\"20200726\"), (\"20200727\")),\n" + - "PARTITION p20200728 VALUES [(\"20200727\"), (\"20200728\")))\n" + - "DISTRIBUTED BY HASH(poi_id) BUCKETS 2\n" + - "PROPERTIES (\n" + - "\"storage_type\" = \"COLUMN\",\n" + - "\"replication_num\" = \"1\"\n" + - ");"; - String createDatePartitionTableStr = "CREATE TABLE db1.date_partition_table (\n" + - " `dt` date NOT NULL COMMENT \"\",\n" + - " `poi_id` bigint(20) NULL COMMENT \"poi_id\",\n" + - " `uv1` bitmap BITMAP_UNION NOT NULL COMMENT \"\",\n" + - " `uv2` bitmap BITMAP_UNION NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`dt`)\n" + - "( PARTITION `p201701` VALUES LESS THAN (\"2020-09-08\"),\n" + - " PARTITION `p201702` VALUES LESS THAN (\"2020-09-09\"),\n" + - " PARTITION `p201703` VALUES LESS THAN (\"2020-09-10\"))\n" + - "DISTRIBUTED BY HASH(`poi_id`) BUCKETS 20\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"DEFAULT\"\n" + - ");"; - String tbl1 = "CREATE TABLE db1.table1 (\n" + - " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + - " `citycode` smallint(6) NULL COMMENT \"\",\n" + - " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + - " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"; - String tbl2 = "CREATE TABLE db1.table2 (\n" + - " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + - " `citycode` smallint(6) NULL COMMENT \"\",\n" + - " `username` varchar(32) NOT NULL DEFAULT \"\" COMMENT \"\",\n" + - " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"; + String createPratitionTableStr = "CREATE TABLE db1.partition_table (\n" + + "datekey int(11) NULL COMMENT \"datekey\",\n" + + "poi_id bigint(20) NULL COMMENT \"poi_id\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(datekey, poi_id)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(datekey)\n" + + "(PARTITION p20200727 VALUES [(\"20200726\"), (\"20200727\")),\n" + + "PARTITION p20200728 VALUES [(\"20200727\"), (\"20200728\")))\n" + + "DISTRIBUTED BY HASH(poi_id) BUCKETS 2\n" + + "PROPERTIES (\n" + + "\"storage_type\" = \"COLUMN\",\n" + + "\"replication_num\" = \"1\"\n" + + ");"; + String createDatePartitionTableStr = "CREATE TABLE db1.date_partition_table (\n" + + " `dt` date NOT NULL COMMENT \"\",\n" + + " `poi_id` bigint(20) NULL COMMENT \"poi_id\",\n" + + " `uv1` bitmap BITMAP_UNION NOT NULL COMMENT \"\",\n" + + " `uv2` bitmap BITMAP_UNION NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`dt`)\n" + + "( PARTITION `p201701` VALUES LESS THAN (\"2020-09-08\"),\n" + + " PARTITION `p201702` VALUES LESS THAN (\"2020-09-09\"),\n" + + " PARTITION `p201703` VALUES LESS THAN (\"2020-09-10\"))\n" + + "DISTRIBUTED BY HASH(`poi_id`) BUCKETS 20\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"DEFAULT\"\n" + + ");"; + String tbl1 = "CREATE TABLE db1.table1 (\n" + + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + + " `citycode` smallint(6) NULL COMMENT \"\",\n" + + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"; + String tbl2 = "CREATE TABLE db1.table2 (\n" + + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + + " `citycode` smallint(6) NULL COMMENT \"\",\n" + + " `username` varchar(32) NOT NULL DEFAULT \"\" COMMENT \"\",\n" + + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"; dorisAssert = new DorisAssert(); dorisAssert.withDatabase("db1").useDatabase("db1"); dorisAssert.withTable(createTblStmtStr) @@ -150,20 +152,20 @@ public void testGroupingSets() throws Exception { @Test public void testSubqueryInCase() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql1 = "SELECT CASE\n" + - " WHEN (\n" + - " SELECT COUNT(*) / 2\n" + - " FROM db1.tbl1\n" + - " ) > k4 THEN (\n" + - " SELECT AVG(k4)\n" + - " FROM db1.tbl1\n" + - " )\n" + - " ELSE (\n" + - " SELECT SUM(k4)\n" + - " FROM db1.tbl1\n" + - " )\n" + - " END AS kk4\n" + - "FROM db1.tbl1;"; + String sql1 = "SELECT CASE\n" + + " WHEN (\n" + + " SELECT COUNT(*) / 2\n" + + " FROM db1.tbl1\n" + + " ) > k4 THEN (\n" + + " SELECT AVG(k4)\n" + + " FROM db1.tbl1\n" + + " )\n" + + " ELSE (\n" + + " SELECT SUM(k4)\n" + + " FROM db1.tbl1\n" + + " )\n" + + " END AS kk4\n" + + "FROM db1.tbl1;"; SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql1, ctx); stmt.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt.toSql().contains("`$a$1`.`$c$1` > `k4` THEN `$a$2`.`$c$2` ELSE `$a$3`.`$c$3`")); @@ -174,6 +176,7 @@ public void testSubqueryInCase() throws Exception { stmt2.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.fail("syntax not supported."); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.fail("must be AnalysisException."); } @@ -183,17 +186,18 @@ public void testSubqueryInCase() throws Exception { stmt3.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.fail("syntax not supported."); } catch (AnalysisException e) { + // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.fail("must be AnalysisException."); } - String sql4 = "select case when k1 < (select max(k1) from db1.tbl1) and " + - "k1 > (select min(k1) from db1.tbl1) then \"empty\" else \"p_test\" end a from db1.tbl1"; + String sql4 = "select case when k1 < (select max(k1) from db1.tbl1) and " + + "k1 > (select min(k1) from db1.tbl1) then \"empty\" else \"p_test\" end a from db1.tbl1"; SelectStmt stmt4 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql4, ctx); stmt4.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt4.toSql().contains("`k1` < `$a$1`.`$c$1` AND `k1` > `$a$2`.`$c$2`")); - String sql5 = "select case when k1 < (select max(k1) from db1.tbl1) is null " + - "then \"empty\" else \"p_test\" end a from db1.tbl1"; + String sql5 = "select case when k1 < (select max(k1) from db1.tbl1) is null " + + "then \"empty\" else \"p_test\" end a from db1.tbl1"; SelectStmt stmt5 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql5, ctx); stmt5.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt5.toSql().contains(" `k1` < `$a$1`.`$c$1` IS NULL ")); @@ -202,203 +206,205 @@ public void testSubqueryInCase() throws Exception { @Test public void testDeduplicateOrs() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2,\n" + - " db1.tbl1 t3,\n" + - " db1.tbl1 t4,\n" + - " db1.tbl1 t5,\n" + - " db1.tbl1 t6\n" + - "where\n" + - " t2.k1 = t1.k1\n" + - " and t1.k2 = t6.k2\n" + - " and t6.k4 = 2001\n" + - " and(\n" + - " (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'D'\n" + - " and t4.k3 = '2 yr Degree'\n" + - " and t1.k4 between 100.00\n" + - " and 150.00\n" + - " and t4.k4 = 3\n" + - " )\n" + - " or (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'S'\n" + - " and t4.k3 = 'Secondary'\n" + - " and t1.k4 between 50.00\n" + - " and 100.00\n" + - " and t4.k4 = 1\n" + - " )\n" + - " or (\n" + - " t1.k2 = t4.k2\n" + - " and t3.k3 = t1.k3\n" + - " and t3.k1 = 'W'\n" + - " and t4.k3 = 'Advanced Degree'\n" + - " and t1.k4 between 150.00\n" + - " and 200.00\n" + - " and t4.k4 = 1\n" + - " )\n" + - " )\n" + - " and(\n" + - " (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('CO', 'IL', 'MN')\n" + - " and t1.k4 between 100\n" + - " and 200\n" + - " )\n" + - " or (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('OH', 'MT', 'NM')\n" + - " and t1.k4 between 150\n" + - " and 300\n" + - " )\n" + - " or (\n" + - " t1.k1 = t5.k1\n" + - " and t5.k2 = 'United States'\n" + - " and t5.k3 in ('TX', 'MO', 'MI')\n" + - " and t1.k4 between 50 and 250\n" + - " )\n" + - " );"; + String sql = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2,\n" + + " db1.tbl1 t3,\n" + + " db1.tbl1 t4,\n" + + " db1.tbl1 t5,\n" + + " db1.tbl1 t6\n" + + "where\n" + + " t2.k1 = t1.k1\n" + + " and t1.k2 = t6.k2\n" + + " and t6.k4 = 2001\n" + + " and(\n" + + " (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'D'\n" + + " and t4.k3 = '2 yr Degree'\n" + + " and t1.k4 between 100.00\n" + + " and 150.00\n" + + " and t4.k4 = 3\n" + + " )\n" + + " or (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'S'\n" + + " and t4.k3 = 'Secondary'\n" + + " and t1.k4 between 50.00\n" + + " and 100.00\n" + + " and t4.k4 = 1\n" + + " )\n" + + " or (\n" + + " t1.k2 = t4.k2\n" + + " and t3.k3 = t1.k3\n" + + " and t3.k1 = 'W'\n" + + " and t4.k3 = 'Advanced Degree'\n" + + " and t1.k4 between 150.00\n" + + " and 200.00\n" + + " and t4.k4 = 1\n" + + " )\n" + + " )\n" + + " and(\n" + + " (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('CO', 'IL', 'MN')\n" + + " and t1.k4 between 100\n" + + " and 200\n" + + " )\n" + + " or (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('OH', 'MT', 'NM')\n" + + " and t1.k4 between 150\n" + + " and 300\n" + + " )\n" + + " or (\n" + + " t1.k1 = t5.k1\n" + + " and t5.k2 = 'United States'\n" + + " and t5.k3 in ('TX', 'MO', 'MI')\n" + + " and t1.k4 between 50 and 250\n" + + " )\n" + + " );"; SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); stmt.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - String rewritedFragment1 = "((`t1`.`k2` = `t4`.`k2` AND `t3`.`k3` = `t1`.`k3` " + - "AND ((`t1`.`k4` >= 50 AND `t1`.`k4` <= 200) AND " + - "(`t3`.`k1` = 'D' OR `t3`.`k1` = 'S' OR `t3`.`k1` = 'W') " + - "AND (`t4`.`k3` = '2 yr Degree' OR `t4`.`k3` = 'Advanced Degree' OR `t4`.`k3` = 'Secondary') " + - "AND (`t4`.`k4` = 1 OR `t4`.`k4` = 3))) " + - "AND ((`t3`.`k1` = 'D' AND `t4`.`k3` = '2 yr Degree' " + - "AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 150 AND `t4`.`k4` = 3) " + - "OR (`t3`.`k1` = 'S' AND `t4`.`k3` = 'Secondary' AND `t1`.`k4` >= 50 " + - "AND `t1`.`k4` <= 100 AND `t4`.`k4` = 1) OR (`t3`.`k1` = 'W' AND `t4`.`k3` = 'Advanced Degree' " + - "AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 200 AND `t4`.`k4` = 1)))"; - String rewritedFragment2 = "((`t1`.`k1` = `t5`.`k1` AND `t5`.`k2` = 'United States' " + - "AND ((`t1`.`k4` >= 50 AND `t1`.`k4` <= 300) " + - "AND `t5`.`k3` IN ('CO', 'IL', 'MN', 'OH', 'MT', 'NM', 'TX', 'MO', 'MI'))) " + - "AND ((`t5`.`k3` IN ('CO', 'IL', 'MN') AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 200) " + - "OR (`t5`.`k3` IN ('OH', 'MT', 'NM') AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 300) OR (`t5`.`k3` IN " + - "('TX', 'MO', 'MI') AND `t1`.`k4` >= 50 AND `t1`.`k4` <= 250)))"; + String rewritedFragment1 = "((`t1`.`k2` = `t4`.`k2` AND `t3`.`k3` = `t1`.`k3` " + + "AND ((`t1`.`k4` >= 50 AND `t1`.`k4` <= 200) AND " + + "(`t3`.`k1` = 'D' OR `t3`.`k1` = 'S' OR `t3`.`k1` = 'W') " + + "AND (`t4`.`k3` = '2 yr Degree' OR `t4`.`k3` = 'Advanced Degree' OR `t4`.`k3` = 'Secondary') " + + "AND (`t4`.`k4` = 1 OR `t4`.`k4` = 3))) " + + "AND ((`t3`.`k1` = 'D' AND `t4`.`k3` = '2 yr Degree' " + + "AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 150 AND `t4`.`k4` = 3) " + + "OR (`t3`.`k1` = 'S' AND `t4`.`k3` = 'Secondary' AND `t1`.`k4` >= 50 " + + "AND `t1`.`k4` <= 100 AND `t4`.`k4` = 1) OR (`t3`.`k1` = 'W' AND `t4`.`k3` = 'Advanced Degree' " + + "AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 200 AND `t4`.`k4` = 1)))"; + String rewritedFragment2 = "((`t1`.`k1` = `t5`.`k1` AND `t5`.`k2` = 'United States' " + + "AND ((`t1`.`k4` >= 50 AND `t1`.`k4` <= 300) " + + "AND `t5`.`k3` IN ('CO', 'IL', 'MN', 'OH', 'MT', 'NM', 'TX', 'MO', 'MI'))) " + + "AND ((`t5`.`k3` IN ('CO', 'IL', 'MN') AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 200) " + + "OR (`t5`.`k3` IN ('OH', 'MT', 'NM') AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 300) OR (`t5`.`k3` IN " + + "('TX', 'MO', 'MI') AND `t1`.`k4` >= 50 AND `t1`.`k4` <= 250)))"; System.out.println(stmt.toSql()); Assert.assertTrue(stmt.toSql().contains(rewritedFragment1)); Assert.assertTrue(stmt.toSql().contains(rewritedFragment2)); - String sql2 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - "(\n" + - " t1.k1 = t2.k3\n" + - " and t2.k2 = 'United States'\n" + - " and t2.k3 in ('CO', 'IL', 'MN')\n" + - " and t1.k4 between 100\n" + - " and 200\n" + - ")\n" + - "or (\n" + - " t1.k1 = t2.k1\n" + - " and t2.k2 = 'United States1'\n" + - " and t2.k3 in ('OH', 'MT', 'NM')\n" + - " and t1.k4 between 150\n" + - " and 300\n" + - ")\n" + - "or (\n" + - " t1.k1 = t2.k1\n" + - " and t2.k2 = 'United States'\n" + - " and t2.k3 in ('TX', 'MO', 'MI')\n" + - " and t1.k4 between 50 and 250\n" + - ")"; + String sql2 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + "(\n" + + " t1.k1 = t2.k3\n" + + " and t2.k2 = 'United States'\n" + + " and t2.k3 in ('CO', 'IL', 'MN')\n" + + " and t1.k4 between 100\n" + + " and 200\n" + + ")\n" + + "or (\n" + + " t1.k1 = t2.k1\n" + + " and t2.k2 = 'United States1'\n" + + " and t2.k3 in ('OH', 'MT', 'NM')\n" + + " and t1.k4 between 150\n" + + " and 300\n" + + ")\n" + + "or (\n" + + " t1.k1 = t2.k1\n" + + " and t2.k2 = 'United States'\n" + + " and t2.k3 in ('TX', 'MO', 'MI')\n" + + " and t1.k4 between 50 and 250\n" + + ")"; SelectStmt stmt2 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql2, ctx); stmt2.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - String fragment3 = "((`t1`.`k1` = `t2`.`k3` AND `t2`.`k2` = 'United States' AND `t2`.`k3` IN ('CO', 'IL', 'MN') " + - "AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 200) OR (`t1`.`k1` = `t2`.`k1` AND `t2`.`k2` = 'United States1' " + - "AND `t2`.`k3` IN ('OH', 'MT', 'NM') AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 300) " + - "OR (`t1`.`k1` = `t2`.`k1` AND `t2`.`k2` = 'United States' AND `t2`.`k3` IN ('TX', 'MO', 'MI') " + - "AND `t1`.`k4` >= 50 AND `t1`.`k4` <= 250))"; + String fragment3 = "((`t1`.`k1` = `t2`.`k3` AND `t2`.`k2` = 'United States'" + + " AND `t2`.`k3` IN ('CO', 'IL', 'MN') " + + "AND `t1`.`k4` >= 100 AND `t1`.`k4` <= 200) " + + "OR (`t1`.`k1` = `t2`.`k1` AND `t2`.`k2` = 'United States1' " + + "AND `t2`.`k3` IN ('OH', 'MT', 'NM') AND `t1`.`k4` >= 150 AND `t1`.`k4` <= 300) " + + "OR (`t1`.`k1` = `t2`.`k1` AND `t2`.`k2` = 'United States' AND `t2`.`k3` IN ('TX', 'MO', 'MI') " + + "AND `t1`.`k4` >= 50 AND `t1`.`k4` <= 250))"; Assert.assertTrue(stmt2.toSql().contains(fragment3)); - String sql3 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t1.k1 = t2.k3 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; + String sql3 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t1.k1 = t2.k3 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; SelectStmt stmt3 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql3, ctx); stmt3.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertFalse(stmt3.toSql().contains("`t1`.`k1` = `t2`.`k3` OR `t1`.`k1` = `t2`.`k3` OR" + - " `t1`.`k1` = `t2`.`k3`")); - - String sql4 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t1.k1 = t2.k2 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; + Assert.assertFalse(stmt3.toSql().contains("`t1`.`k1` = `t2`.`k3` OR `t1`.`k1` = `t2`.`k3` OR" + + " `t1`.`k1` = `t2`.`k3`")); + + String sql4 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t1.k1 = t2.k2 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; SelectStmt stmt4 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql4, ctx); stmt4.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt4.toSql().contains("`t1`.`k1` = `t2`.`k2` OR `t1`.`k1` = `t2`.`k3`")); - String sql5 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t2.k1 is not null or t1.k1 is not null or t1.k1 is not null"; + String sql5 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t2.k1 is not null or t1.k1 is not null or t1.k1 is not null"; SelectStmt stmt5 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql5, ctx); stmt5.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt5.toSql().contains("`t2`.`k1` IS NOT NULL OR `t1`.`k1` IS NOT NULL")); Assert.assertEquals(2, stmt5.toSql().split(" OR ").length); - String sql6 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t2.k1 is not null or t1.k1 is not null and t1.k1 is not null"; + String sql6 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t2.k1 is not null or t1.k1 is not null and t1.k1 is not null"; SelectStmt stmt6 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql6, ctx); stmt6.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); Assert.assertTrue(stmt6.toSql().contains("`t2`.`k1` IS NOT NULL OR `t1`.`k1` IS NOT NULL")); Assert.assertEquals(2, stmt6.toSql().split(" OR ").length); - String sql7 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t2.k1 is not null or t1.k1 is not null and t1.k2 is not null"; + String sql7 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t2.k1 is not null or t1.k1 is not null and t1.k2 is not null"; SelectStmt stmt7 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql7, ctx); stmt7.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertTrue(stmt7.toSql().contains("`t2`.`k1` IS NOT NULL OR (`t1`.`k1` IS NOT NULL " + - "AND `t1`.`k2` IS NOT NULL)")); - - String sql8 = "select\n" + - " avg(t1.k4)\n" + - "from\n" + - " db1.tbl1 t1,\n" + - " db1.tbl1 t2\n" + - "where\n" + - " t2.k1 is not null and t1.k1 is not null and t1.k1 is not null"; + Assert.assertTrue(stmt7.toSql().contains("`t2`.`k1` IS NOT NULL OR (`t1`.`k1` IS NOT NULL " + + "AND `t1`.`k2` IS NOT NULL)")); + + String sql8 = "select\n" + + " avg(t1.k4)\n" + + "from\n" + + " db1.tbl1 t1,\n" + + " db1.tbl1 t2\n" + + "where\n" + + " t2.k1 is not null and t1.k1 is not null and t1.k1 is not null"; SelectStmt stmt8 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql8, ctx); stmt8.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertTrue(stmt8.toSql().contains("`t2`.`k1` IS NOT NULL AND `t1`.`k1` IS NOT NULL" + - " AND `t1`.`k1` IS NOT NULL")); + Assert.assertTrue(stmt8.toSql().contains("`t2`.`k1` IS NOT NULL AND `t1`.`k1` IS NOT NULL" + + " AND `t1`.`k1` IS NOT NULL")); String sql9 = "select * from db1.tbl1 where (k1='shutdown' and k4<1) or (k1='switchOff' and k4>=1)"; SelectStmt stmt9 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql9, ctx); stmt9.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertTrue(stmt9.toSql().contains("(`k1` = 'shutdown' AND `k4` < 1)" + - " OR (`k1` = 'switchOff' AND `k4` >= 1)")); + Assert.assertTrue(stmt9.toSql().contains("(`k1` = 'shutdown' AND `k4` < 1)" + + " OR (`k1` = 'switchOff' AND `k4` >= 1)")); } @Test @@ -426,9 +432,9 @@ public void testMultrGroupByInCorrelationSubquery() throws Exception { } @Test - public void testOuterJoinNullUnionView() throws Exception{ - String sql = "WITH test_view(k) AS(SELECT NULL AS k UNION ALL SELECT NULL AS k )\n" + - "SELECT v1.k FROM test_view AS v1 LEFT OUTER JOIN test_view AS v2 ON v1.k=v2.k"; + public void testOuterJoinNullUnionView() throws Exception { + String sql = "WITH test_view(k) AS(SELECT NULL AS k UNION ALL SELECT NULL AS k )\n" + + "SELECT v1.k FROM test_view AS v1 LEFT OUTER JOIN test_view AS v2 ON v1.k=v2.k"; dorisAssert.query(sql).explainQuery(); } @@ -436,11 +442,14 @@ public void testOuterJoinNullUnionView() throws Exception{ public void testDataGripSupport() throws Exception { String sql = "select schema();"; dorisAssert.query(sql).explainQuery(); - sql = "select\n" + - "collation_name,\n" + - "character_set_name,\n" + - "is_default collate utf8_general_ci = 'Yes' as is_default\n" + - "from information_schema.collations"; + sql = "select\n" + + + "collation_name,\n" + + + "character_set_name,\n" + + + "is_default collate utf8_general_ci = 'Yes' as is_default\n" + + "from information_schema.collations"; dorisAssert.query(sql).explainQuery(); } @@ -471,12 +480,12 @@ public void testImplicitConvertSupport() throws Exception { .query(sql2) .explainQuery() .contains("`datekey` = 20200730")); - String sql3= "select count() from db1.date_partition_table where dt=20200908"; + String sql3 = "select count() from db1.date_partition_table where dt=20200908"; Assert.assertTrue(dorisAssert .query(sql3) .explainQuery() .contains("`dt` = '2020-09-08 00:00:00'")); - String sql4= "select count() from db1.date_partition_table where dt='2020-09-08'"; + String sql4 = "select count() from db1.date_partition_table where dt='2020-09-08'"; Assert.assertTrue(dorisAssert .query(sql4) .explainQuery() @@ -566,8 +575,8 @@ public void testSelectHintSetVar() throws Exception { Assert.assertEquals(VariableMgr.getDefaultSessionVariable().getMaxExecMemByte(), planner.getPlannerContext().getQueryOptions().mem_limit); - sql = "select /*+ SET_VAR(exec_mem_limit = 8589934592) */ poi_id, count(*) from db1.partition_table " + - "where datekey=20200726 group by 1"; + sql = "select /*+ SET_VAR(exec_mem_limit = 8589934592) */ poi_id, count(*) from db1.partition_table " + + "where datekey=20200726 group by 1"; planner = dorisAssert.query(sql).internalExecuteOneAndGetPlan(); Assert.assertEquals(8589934592L, planner.getPlannerContext().getQueryOptions().mem_limit); @@ -589,9 +598,9 @@ public void testWithWithoutDatabase() throws Exception { dorisAssert.withoutUseDatabase(); dorisAssert.query(sql).explainQuery(); - sql = "with tmp as (select * from db1.table1) " + - "select a.siteid, b.citycode, a.siteid from (select siteid, citycode from tmp) a " + - "left join (select siteid, citycode from tmp) b on a.siteid = b.siteid;"; + sql = "with tmp as (select * from db1.table1) " + + "select a.siteid, b.citycode, a.siteid from (select siteid, citycode from tmp) a " + + "left join (select siteid, citycode from tmp) b on a.siteid = b.siteid;"; dorisAssert.withoutUseDatabase(); dorisAssert.query(sql).explainQuery(); } @@ -637,7 +646,7 @@ public void testOutfile() throws Exception { // schema can not be empty sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"\");"; try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Parquet schema property should not be empty")); } @@ -645,7 +654,7 @@ public void testOutfile() throws Exception { // schema must contains 3 fields sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"int32,siteid;\");"; try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("must only contains repetition type/column type/column name")); } @@ -653,7 +662,7 @@ public void testOutfile() throws Exception { // unknown repetition type sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"repeat, int32,siteid;\");"; try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("unknown repetition type")); } @@ -661,7 +670,7 @@ public void testOutfile() throws Exception { // only support required type sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"repeated,int32,siteid;\");"; try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("currently only support required type")); } @@ -669,32 +678,35 @@ public void testOutfile() throws Exception { // unknown data type sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"required,int128,siteid;\");"; try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("data type is not supported")); } // contains parquet properties - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"required,byte_array,siteid;\", 'parquet.compression'='snappy');"; + sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\"" + + " FORMAT AS PARQUET" + + " PROPERTIES (\"schema\"=\"required,byte_array,siteid;\"," + + " 'parquet.compression'='snappy');"; dorisAssert.query(sql).explainQuery(); // support parquet for broker - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " + - "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + - "\"broker.hadoop.security.authentication\" = \"kerberos\", " + - "\"broker.kerberos_principal\" = \"test\", " + - "\"broker.kerberos_keytab_content\" = \"test\" , " + - "\"schema\"=\"required,byte_array,siteid;\");"; + sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " + + "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + + "\"broker.hadoop.security.authentication\" = \"kerberos\", " + + "\"broker.kerberos_principal\" = \"test\", " + + "\"broker.kerberos_keytab_content\" = \"test\" , " + + "\"schema\"=\"required,byte_array,siteid;\");"; dorisAssert.query(sql).explainQuery(); // do not support large int type try { - sql = "SELECT k5 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " + - "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + - "\"broker.hadoop.security.authentication\" = \"kerberos\", " + - "\"broker.kerberos_principal\" = \"test\", " + - "\"broker.kerberos_keytab_content\" = \"test\" ," + - " \"schema\"=\"required,int32,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + sql = "SELECT k5 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " + + "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + + "\"broker.hadoop.security.authentication\" = \"kerberos\", " + + "\"broker.kerberos_principal\" = \"test\", " + + "\"broker.kerberos_keytab_content\" = \"test\" ," + + " \"schema\"=\"required,int32,siteid;\");"; + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { e.printStackTrace(); Assert.assertTrue(e.getMessage().contains("Parquet format does not support column type: LARGEINT")); @@ -702,26 +714,27 @@ public void testOutfile() throws Exception { // do not support large int type, contains function try { - sql = "SELECT sum(k5) FROM db1.tbl1 group by k5 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " + - "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + - "\"broker.hadoop.security.authentication\" = \"kerberos\", " + - "\"broker.kerberos_principal\" = \"test\", " + - "\"broker.kerberos_keytab_content\" = \"test\" , " + - "\"schema\"=\"required,int32,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + sql = "SELECT sum(k5) FROM db1.tbl1 group by k5 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " + + "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + + "\"broker.hadoop.security.authentication\" = \"kerberos\", " + + "\"broker.kerberos_principal\" = \"test\", " + + "\"broker.kerberos_keytab_content\" = \"test\" , " + + "\"schema\"=\"required,int32,siteid;\");"; + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Parquet format does not support column type: LARGEINT")); } // support cast try { - sql = "SELECT cast(sum(k5) as bigint) FROM db1.tbl1 group by k5 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " + - "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + - "\"broker.hadoop.security.authentication\" = \"kerberos\", " + - "\"broker.kerberos_principal\" = \"test\", " + - "\"broker.kerberos_keytab_content\" = \"test\" , " + - "\"schema\"=\"required,int64,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); + sql = "SELECT cast(sum(k5) as bigint) FROM db1.tbl1 group by k5" + + " INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " + + "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " + + "\"broker.hadoop.security.authentication\" = \"kerberos\", " + + "\"broker.kerberos_principal\" = \"test\", " + + "\"broker.kerberos_keytab_content\" = \"test\" , " + + "\"schema\"=\"required,int64,siteid;\");"; + SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -729,13 +742,13 @@ public void testOutfile() throws Exception { @Test public void testSystemViewCaseInsensitive() throws Exception { - String sql1 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + - "'ech_dw' ORDER BY ROUTINES.ROUTINE_SCHEMA\n"; + String sql1 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + + "'ech_dw' ORDER BY ROUTINES.ROUTINE_SCHEMA\n"; // The system view names in information_schema are case-insensitive, dorisAssert.query(sql1).explainQuery(); - String sql2 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + - "'ech_dw' ORDER BY routines.ROUTINE_SCHEMA\n"; + String sql2 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + + "'ech_dw' ORDER BY routines.ROUTINE_SCHEMA\n"; try { // Should not refer to one of system views using different cases within the same statement. // sql2 is wrong because 'ROUTINES' and 'routines' are used. @@ -750,38 +763,38 @@ public void testSystemViewCaseInsensitive() throws Exception { public void testWithUnionToSql() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); String sql1 = - "select \n" + - " t.k1 \n" + - "from (\n" + - " with \n" + - " v1 as (select t1.k1 from db1.tbl1 t1),\n" + - " v2 as (select t2.k1 from db1.tbl1 t2)\n" + - " select v1.k1 as k1 from v1\n" + - " union\n" + - " select v2.k1 as k1 from v2\n" + - ") t"; + "select \n" + + " t.k1 \n" + + "from (\n" + + " with \n" + + " v1 as (select t1.k1 from db1.tbl1 t1),\n" + + " v2 as (select t2.k1 from db1.tbl1 t2)\n" + + " select v1.k1 as k1 from v1\n" + + " union\n" + + " select v2.k1 as k1 from v2\n" + + ") t"; SelectStmt stmt1 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql1, ctx); stmt1.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertTrue(stmt1.toSql().equals("SELECT `t`.`k1` AS `k1` " + - "FROM (WITH v1 AS (SELECT `t1`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t1)," + - "v2 AS (SELECT `t2`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t2) " + - "SELECT `v1`.`k1` AS `k1` FROM `v1` UNION SELECT `v2`.`k1` AS `k1` FROM `v2`) t")); + Assert.assertEquals("SELECT `t`.`k1` AS `k1` " + + "FROM (WITH v1 AS (SELECT `t1`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t1)," + + "v2 AS (SELECT `t2`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t2) " + + "SELECT `v1`.`k1` AS `k1` FROM `v1` UNION SELECT `v2`.`k1` AS `k1` FROM `v2`) t", stmt1.toSql()); String sql2 = - "with\n" + - " v1 as (select t1.k1 from db1.tbl1 t1),\n" + - " v2 as (select t2.k1 from db1.tbl1 t2)\n" + - "select\n" + - " t.k1\n" + - "from (\n" + - " select v1.k1 as k1 from v1\n" + - " union\n" + - " select v2.k1 as k1 from v2\n" + - ") t"; + "with\n" + + " v1 as (select t1.k1 from db1.tbl1 t1),\n" + + " v2 as (select t2.k1 from db1.tbl1 t2)\n" + + "select\n" + + " t.k1\n" + + "from (\n" + + " select v1.k1 as k1 from v1\n" + + " union\n" + + " select v2.k1 as k1 from v2\n" + + ") t"; SelectStmt stmt2 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql2, ctx); stmt2.rewriteExprs(new Analyzer(ctx.getCatalog(), ctx).getExprRewriter()); - Assert.assertTrue(stmt2.toSql().contains("WITH v1 AS (SELECT `t1`.`k1` AS `k1` FROM " + - "`default_cluster:db1`.`tbl1` t1),v2 AS (SELECT `t2`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t2)")); + Assert.assertTrue(stmt2.toSql().contains("WITH v1 AS (SELECT `t1`.`k1` AS `k1` FROM " + + "`default_cluster:db1`.`tbl1` t1),v2 AS (SELECT `t2`.`k1` AS `k1` FROM `default_cluster:db1`.`tbl1` t2)")); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowAlterStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowAlterStmtTest.java index 11f7f55ee0bec9..39a6b2f63a7031 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowAlterStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowAlterStmtTest.java @@ -68,8 +68,8 @@ public void setUp() { @Test public void testAlterStmt1() throws UserException, AnalysisException { - ShowAlterStmt stmt = new ShowAlterStmt(ShowAlterStmt.AlterType.COLUMN,null, null, - null,null); + ShowAlterStmt stmt = new ShowAlterStmt(ShowAlterStmt.AlterType.COLUMN, null, null, + null, null); stmt.analyzeSyntax(analyzer); Assert.assertEquals("SHOW ALTER TABLE COLUMN FROM `testDb`", stmt.toString()); } @@ -80,7 +80,7 @@ public void testAlterStmt2() throws UserException, AnalysisException { StringLiteral stringLiteral = new StringLiteral("abc"); BinaryPredicate binaryPredicate = new BinaryPredicate(Operator.EQ, slotRef, stringLiteral); ShowAlterStmt stmt = new ShowAlterStmt(ShowAlterStmt.AlterType.COLUMN, null, binaryPredicate, null, - new LimitElement(1,2)); + new LimitElement(1, 2)); stmt.analyzeSyntax(analyzer); Assert.assertEquals("SHOW ALTER TABLE COLUMN FROM `testDb` WHERE `TableName` = \'abc\' LIMIT 1, 2", stmt.toString()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java index 4af3be7daa57b4..6c40f6cc4797ef 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java @@ -108,8 +108,8 @@ public void testUnsupportFilter() throws UserException { BinaryPredicate binaryPredicate = new BinaryPredicate(BinaryPredicate.Operator.EQ, slotRef, stringLiteral); ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), binaryPredicate, null, null, false); expectedEx.expect(AnalysisException.class); - expectedEx.expectMessage("Only the columns of PartitionId/PartitionName/" + - "State/Buckets/ReplicationNum/LastConsistencyCheckTime are supported."); + expectedEx.expectMessage("Only the columns of PartitionId/PartitionName/" + + "State/Buckets/ReplicationNum/LastConsistencyCheckTime are supported."); stmt.analyzeImpl(analyzer); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowViewStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowViewStmtTest.java index cbf3315f80535f..4c5181fad34bbc 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowViewStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/ShowViewStmtTest.java @@ -49,42 +49,42 @@ public static void tearDown() throws Exception { @BeforeClass public static void setUp() throws Exception { UtFrameUtils.createDorisCluster(runningDir); - String testTbl1 = "CREATE TABLE `test1` (\n" + - " `a` int(11) NOT NULL COMMENT \"\",\n" + - " `b` int(11) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`a`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`a`) BUCKETS 8\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"; - String testTbl2 = "CREATE TABLE `test2` (\n" + - " `c` int(11) NOT NULL COMMENT \"\",\n" + - " `d` int(11) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`c`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`c`) BUCKETS 8\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"; - String testTbl3 = "CREATE TABLE `test3` (\n" + - " `e` int(11) NOT NULL COMMENT \"\",\n" + - " `f` int(11) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`e`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`e`) BUCKETS 8\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"; + String testTbl1 = "CREATE TABLE `test1` (\n" + + " `a` int(11) NOT NULL COMMENT \"\",\n" + + " `b` int(11) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`a`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`a`) BUCKETS 8\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"; + String testTbl2 = "CREATE TABLE `test2` (\n" + + " `c` int(11) NOT NULL COMMENT \"\",\n" + + " `d` int(11) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`c`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`c`) BUCKETS 8\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"; + String testTbl3 = "CREATE TABLE `test3` (\n" + + " `e` int(11) NOT NULL COMMENT \"\",\n" + + " `f` int(11) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`e`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`e`) BUCKETS 8\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"; dorisAssert = new DorisAssert(); dorisAssert.withDatabase("testDb").useDatabase("testDb"); @@ -117,8 +117,8 @@ public void testNoDb() throws Exception { @Test public void testShowView() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String testView1 = "CREATE VIEW `view1` as \n" + - "SELECT a, b FROM test1;"; + String testView1 = "CREATE VIEW `view1` as \n" + + "SELECT a, b FROM test1;"; dorisAssert.withView(testView1); ShowViewStmt stmt = new ShowViewStmt("", new TableName("testDb", "test1")); @@ -137,10 +137,10 @@ public void testShowView() throws Exception { @Test public void testShowViewWithJoin() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String testView2 = "CREATE VIEW `view2` as \n" + - "SELECT a, c FROM test1 \n" + - "LEFT OUTER JOIN test2 \n" + - "ON test1.a = test2.c;"; + String testView2 = "CREATE VIEW `view2` as \n" + + "SELECT a, c FROM test1 \n" + + "LEFT OUTER JOIN test2 \n" + + "ON test1.a = test2.c;"; dorisAssert.withView(testView2); ShowViewStmt stmt = new ShowViewStmt("", new TableName("testDb", "test1")); @@ -169,11 +169,11 @@ public void testShowViewWithJoin() throws Exception { @Test public void testShowViewWithNestedSqlView() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String testView3 = "CREATE VIEW `view3` as \n" + - "SELECT a, d FROM test1 \n" + - "LEFT OUTER JOIN \n" + - "(SELECT d, e FROM test3 LEFT OUTER JOIN test2 ON test3.e = test2.c) test4 \n" + - "ON test1.a = test4.e;"; + String testView3 = "CREATE VIEW `view3` as \n" + + "SELECT a, d FROM test1 \n" + + "LEFT OUTER JOIN \n" + + "(SELECT d, e FROM test3 LEFT OUTER JOIN test2 ON test3.e = test2.c) test4 \n" + + "ON test1.a = test4.e;"; dorisAssert.withView(testView3); ShowViewStmt stmt = new ShowViewStmt("", new TableName("testDb", "test1")); @@ -212,12 +212,12 @@ public void testShowViewWithNestedSqlView() throws Exception { @Test public void testShowViewWithNestedView() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String testView4 = "CREATE VIEW `view4` as \n" + - "SELECT a, b FROM test1;"; - String testView5 = "CREATE VIEW `view5` as \n" + - "SELECT c FROM test2 \n" + - "LEFT OUTER JOIN view4 \n" + - "ON test2.c = view4.a;"; + String testView4 = "CREATE VIEW `view4` as \n" + + "SELECT a, b FROM test1;"; + String testView5 = "CREATE VIEW `view5` as \n" + + "SELECT c FROM test2 \n" + + "LEFT OUTER JOIN view4 \n" + + "ON test2.c = view4.a;"; dorisAssert.withView(testView4); dorisAssert.withView(testView5); @@ -239,11 +239,11 @@ public void testShowViewWithNestedView() throws Exception { @Test public void testGetTableRefs() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "with w as (select a from testDb.test1) " + - "select c, d from testDb.test2 " + - "left outer join " + - "(select e from testDb.test3 join w on testDb.test3.e = w.a) test4 " + - "on test1.b = test4.d"; + String sql = "with w as (select a from testDb.test1) " + + "select c, d from testDb.test2 " + + "left outer join " + + "(select e from testDb.test3 join w on testDb.test3.e = w.a) test4 " + + "on test1.b = test4.d"; SqlScanner input = new SqlScanner(new StringReader(sql)); SqlParser parser = new SqlParser(input); QueryStmt queryStmt = (QueryStmt) SqlParserUtils.getFirstStmt(parser); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java index 6ee7e44121298d..7625921f23e403 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java @@ -40,13 +40,13 @@ public class StmtRewriterTest { private static DorisAssert dorisAssert; @BeforeClass - public static void beforeClass() throws Exception{ + public static void beforeClass() throws Exception { FeConstants.runningUnitTest = true; UtFrameUtils.createDorisCluster(runningDir); dorisAssert = new DorisAssert(); dorisAssert.withDatabase(DB_NAME).useDatabase(DB_NAME); - String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME + " (empid int, name varchar, " + - "deptno int, salary int, commission int) " + String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME + " (empid int, name varchar, " + + "deptno int, salary int, commission int) " + "distributed by hash(empid) buckets 3 properties('replication_num' = '1');"; dorisAssert.withTable(createTableSQL); } @@ -150,8 +150,8 @@ public static void beforeClass() throws Exception{ @Test public void testRewriteHavingClauseSubqueries() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select empid, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" + - subquery + ");"; + String query = "select empid, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" + + subquery + ");"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); dorisAssert.query(query).explainContains("CROSS JOIN", "predicates: sum(`salary`) > avg(`salary`)"); @@ -261,8 +261,8 @@ public void testRewriteHavingClauseSubqueries() throws Exception { @Test public void testRewriteHavingClauseWithOrderBy() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select empid a, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" + - subquery + ") order by a;"; + String query = "select empid a, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" + + subquery + ") order by a;"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); dorisAssert.query(query).explainContains("CROSS JOIN", "predicates: sum(`salary`) > avg(`salary`)", @@ -373,8 +373,8 @@ public void testRewriteHavingClauseWithOrderBy() throws Exception { @Test public void testRewriteHavingClauseMissingAggregationColumn() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select empid a from " + TABLE_NAME + " group by empid having sum(salary) > (" + - subquery + ") order by sum(salary);"; + String query = "select empid a from " + TABLE_NAME + " group by empid having sum(salary) > (" + + subquery + ") order by sum(salary);"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); dorisAssert.query(query).explainContains("group by: `empid`", "CROSS JOIN", @@ -488,8 +488,8 @@ public void testRewriteHavingClauseMissingAggregationColumn() throws Exception { @Test public void testRewriteHavingClauseWithAlias() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + - subquery + ") order by b;"; + String query = "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + + subquery + ") order by b;"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); dorisAssert.query(query).explainContains("group by: `empid`", "CROSS JOIN", @@ -602,8 +602,8 @@ public void testRewriteHavingClauseWithAlias() throws Exception { @Test public void testRewriteHavingClausewWithLimit() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + - subquery + ") order by b limit 100;"; + String query = "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + + subquery + ") order by b limit 100;"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); dorisAssert.query(query).explainContains("group by: `empid`", "CROSS JOIN", @@ -618,13 +618,12 @@ public void testRewriteHavingClausewWithLimit() throws Exception { @Test public void testRewriteHavingClauseWithBetweenAndInSubquery() throws Exception { String subquery = "select avg(salary) from " + TABLE_NAME + " where empid between 1 and 2"; - String query = "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + - subquery + ");"; + String query = + "select empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + subquery + ");"; LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains( - "CROSS JOIN", - "predicates: sum(`salary`) > avg(`salary`)", - "PREDICATES: `empid` >= 1, `empid` <= 2"); + dorisAssert.query(query) + .explainContains("CROSS JOIN", "predicates: sum(`salary`) > avg(`salary`)", + "PREDICATES: `empid` >= 1, `empid` <= 2"); } @AfterClass diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java index 7445467f9c7a60..6d73f9f51de278 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java @@ -52,20 +52,20 @@ public static void setUp() throws Exception { Config.enable_batch_delete_by_default = true; Config.enable_http_server_v2 = false; UtFrameUtils.createDorisCluster(runningDir); - String table1 = "CREATE TABLE db1.TABLE1 (\n" + - " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + - " `citycode` smallint(6) NULL COMMENT \"\",\n" + - " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + - " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"; + String table1 = "CREATE TABLE db1.TABLE1 (\n" + + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + + " `citycode` smallint(6) NULL COMMENT \"\",\n" + + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"; String table2 = "create table db1.TABLE2(k1 int, k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; dorisAssert = new DorisAssert(); @@ -89,12 +89,12 @@ public void testTableNameLowerCase() { @Test public void testQueryTableNameCaseInsensitive() throws Exception { - String sql1 = "select Table1.siteid, Table2.k2 from Table1 join Table2 on Table1.siteid = Table2.k1" + - " where Table2.k5 > 1000 order by Table1.siteid"; + String sql1 = "select Table1.siteid, Table2.k2 from Table1 join Table2 on Table1.siteid = Table2.k1" + + " where Table2.k5 > 1000 order by Table1.siteid"; dorisAssert.query(sql1).explainQuery(); - String sql2 = "select Table1.siteid, Table2.k2 from table1 join table2 on TAble1.siteid = TAble2.k1" + - " where TABle2.k5 > 1000 order by TABLe1.siteid"; + String sql2 = "select Table1.siteid, Table2.k2 from table1 join table2 on TAble1.siteid = TAble2.k1" + + " where TABle2.k5 > 1000 order by TABLe1.siteid"; try { dorisAssert.query(sql2).explainQuery(); Assert.fail("Different references to the same table name are used: 'table1', 'TAble1'"); @@ -109,8 +109,8 @@ public void testCreateSameTableFailed() { + "AGGREGATE KEY(k1, k2, k3) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; try { dorisAssert.withTable(table2); - Assert.fail("The table name is case insensitive, " + - "but the tables 'TABLE2' and 'table2' were successfully created"); + Assert.fail("The table name is case insensitive, " + + "but the tables 'TABLE2' and 'table2' were successfully created"); } catch (Exception e) { System.out.println(e.getMessage()); } @@ -118,8 +118,8 @@ public void testCreateSameTableFailed() { String view2 = "create view table2 as select * from TABLE2"; try { dorisAssert.withView(view2); - Assert.fail("The table name is case insensitive, " + - "but the table 'TABLE2' and view 'table2' were successfully created"); + Assert.fail("The table name is case insensitive, " + + "but the table 'TABLE2' and view 'table2' were successfully created"); } catch (Exception e) { System.out.println(e.getMessage()); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java index 8e5843712b46eb..bfd783bacfa92b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java @@ -51,20 +51,20 @@ public static void setUp() throws Exception { Config.enable_batch_delete_by_default = true; Config.enable_http_server_v2 = false; UtFrameUtils.createDorisCluster(runningDir); - String table1 = "CREATE TABLE db1.TABLE1 (\n" + - " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + - " `citycode` smallint(6) NULL COMMENT \"\",\n" + - " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + - " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"; + String table1 = "CREATE TABLE db1.TABLE1 (\n" + + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" + + " `citycode` smallint(6) NULL COMMENT \"\",\n" + + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" + + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"; String table2 = "create table db1.TABLE2(k1 int, k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; dorisAssert = new DorisAssert(); @@ -89,19 +89,19 @@ public void testTableNameLowerCase() { @Test public void testQueryTableNameCaseInsensitive() throws Exception { - String sql1 = "select Table1.siteid, Table2.k2 from table1 join table2 on TAble1.siteid = TAble2.k1" + - " where TABle2.k5 > 1000 order by TABLe1.siteid"; + String sql1 = "select Table1.siteid, Table2.k2 from table1 join table2 on TAble1.siteid = TAble2.k1" + + " where TABle2.k5 > 1000 order by TABLe1.siteid"; dorisAssert.query(sql1).explainQuery(); - String sql2 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + - "'ech_dw' ORDER BY routines.ROUTINE_SCHEMA"; + String sql2 = "SELECT ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " + + "'ech_dw' ORDER BY routines.ROUTINE_SCHEMA"; dorisAssert.query(sql2).explainQuery(); } @Test public void testQueryTableAliasCaseInsensitive() throws Exception { - String sql1 = "select T1.siteid, t2.k2 from table1 T1 join table2 T2 on t1.siteid = t2.k1" + - " where T2.k5 > 1000 order by t1.siteid"; + String sql1 = "select T1.siteid, t2.k2 from table1 T1 join table2 T2 on t1.siteid = t2.k1" + + " where T2.k5 > 1000 order by t1.siteid"; dorisAssert.query(sql1).explainQuery(); String sql2 = "select t.siteid, T.username from (select * from Table1) T"; @@ -114,8 +114,8 @@ public void testCreateSameTableFailed() { + "AGGREGATE KEY(k1, k2, k3) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; try { dorisAssert.withTable(table2); - Assert.fail("The table name is case insensitive, " + - "but the tables 'TABLE2' and 'table2' were successfully created"); + Assert.fail("The table name is case insensitive, " + + "but the tables 'TABLE2' and 'table2' were successfully created"); } catch (Exception e) { System.out.println(e.getMessage()); } @@ -123,8 +123,8 @@ public void testCreateSameTableFailed() { String view2 = "create view table2 as select * from TABLE2"; try { dorisAssert.withView(view2); - Assert.fail("The table name is case insensitive, " + - "but the table 'TABLE2' and view 'table2' were successfully created"); + Assert.fail("The table name is case insensitive, " + + "but the table 'TABLE2' and view 'table2' were successfully created"); } catch (Exception e) { System.out.println(e.getMessage()); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/BrokerStorageTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/BrokerStorageTest.java index 43f34f75e7b671..55342fdf240697 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/BrokerStorageTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/BrokerStorageTest.java @@ -30,7 +30,6 @@ import mockit.Mocked; import mockit.Tested; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig; import org.apache.thrift.TServiceClient; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TProtocol; @@ -89,31 +88,30 @@ public void setUp() throws Exception { storage = new BrokerStorage("bos_broker", properties); testFile = bucket + basePath + "/Ode_to_the_West_Wind"; content = - "O wild West Wind, thou breath of Autumn's being\n" + - "Thou, from whose unseen presence the leaves dead\n" + - "Are driven, like ghosts from an enchanter fleeing,\n" + - "Yellow, and black, and pale, and hectic red,\n" + - "Pestilence-stricken multitudes:O thou\n" + - "Who chariotest to their dark wintry bed\n" + - "The winged seeds, where they lie cold and low,\n" + - "Each like a corpse within its grave, until\n" + - "Thine azure sister of the Spring shall blow\n" + - "Her clarion o'er the dreaming earth, and fill\n" + - "(Driving sweet buds like flocks to feed in air)\n" + - "With living hues and odors plain and hill:\n" + - "Wild Spirit, which art moving everywhere;\n" + - "Destroyer and preserver; hear, oh, hear!"; + "O wild West Wind, thou breath of Autumn's being\n" + + "Thou, from whose unseen presence the leaves dead\n" + + "Are driven, like ghosts from an enchanter fleeing,\n" + + "Yellow, and black, and pale, and hectic red,\n" + + "Pestilence-stricken multitudes:O thou\n" + + "Who chariotest to their dark wintry bed\n" + + "The winged seeds, where they lie cold and low,\n" + + "Each like a corpse within its grave, until\n" + + "Thine azure sister of the Spring shall blow\n" + + "Her clarion o'er the dreaming earth, and fill\n" + + "(Driving sweet buds like flocks to feed in air)\n" + + "With living hues and odors plain and hill:\n" + + "Wild Spirit, which art moving everywhere;\n" + + "Destroyer and preserver; hear, oh, hear!"; new MockUp() { @Mock private Pair getBroker() { return pair; } }; - GenericKeyedObjectPoolConfig brokerPoolConfig = new GenericKeyedObjectPoolConfig(); new Expectations() { { pool.returnObject(withInstanceOf(TNetworkAddress.class), withInstanceOf(TServiceClient.class)); - minTimes =0; + minTimes = 0; } }; Deencapsulation.setField(ClientPool.class, "brokerPool", pool); @@ -149,7 +147,7 @@ public void upload() throws IOException { status = storage.downloadWithFileSize(remote, localFile2.getAbsolutePath(), 1024 * 1024); Assert.assertEquals(Status.OK, status); Assert.assertEquals(DigestUtils.md5Hex(new FileInputStream(localFile)), - DigestUtils.md5Hex(new FileInputStream(localFile2))); + DigestUtils.md5Hex(new FileInputStream(localFile2))); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java index 157e9d18f06fdf..4bf24e131555bd 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java @@ -392,7 +392,7 @@ public static Database mockDb() throws UserException { public static Catalog fetchAdminCatalog() { try { - FakeEditLog fakeEditLog = new FakeEditLog(); + FakeEditLog fakeEditLog = new FakeEditLog(); // CHECKSTYLE IGNORE THIS LINE Catalog catalog = Deencapsulation.newInstance(Catalog.class); diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java index 71ad6827eaa4d9..76c5f2e918ae9b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java @@ -222,7 +222,7 @@ boolean await(long timeout, TimeUnit unit) { for (Tablet tablet : index.getTablets()) { List files = Lists.newArrayList(tablet.getId() + ".dat", - tablet.getId()+ ".idx", tablet.getId()+".hdr"); + tablet.getId() + ".idx", tablet.getId() + ".hdr"); BackupTabletInfo tabletInfo = new BackupTabletInfo(tablet.getId(), files); idxInfo.sortedTabletInfoList.add(tabletInfo); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/S3StorageTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/S3StorageTest.java index cdbe6d4ca258fc..7ffa32a59764d5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/S3StorageTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/S3StorageTest.java @@ -63,20 +63,20 @@ public void setUp() throws Exception { testFile = bucket + basePath + "/Ode_to_the_West_Wind"; content = - "O wild West Wind, thou breath of Autumn's being\n" + - "Thou, from whose unseen presence the leaves dead\n" + - "Are driven, like ghosts from an enchanter fleeing,\n" + - "Yellow, and black, and pale, and hectic red,\n" + - "Pestilence-stricken multitudes:O thou\n" + - "Who chariotest to their dark wintry bed\n" + - "The winged seeds, where they lie cold and low,\n" + - "Each like a corpse within its grave, until\n" + - "Thine azure sister of the Spring shall blow\n" + - "Her clarion o'er the dreaming earth, and fill\n" + - "(Driving sweet buds like flocks to feed in air)\n" + - "With living hues and odors plain and hill:\n" + - "Wild Spirit, which art moving everywhere;\n" + - "Destroyer and preserver; hear, oh, hear!"; + "O wild West Wind, thou breath of Autumn's being\n" + + "Thou, from whose unseen presence the leaves dead\n" + + "Are driven, like ghosts from an enchanter fleeing,\n" + + "Yellow, and black, and pale, and hectic red,\n" + + "Pestilence-stricken multitudes:O thou\n" + + "Who chariotest to their dark wintry bed\n" + + "The winged seeds, where they lie cold and low,\n" + + "Each like a corpse within its grave, until\n" + + "Thine azure sister of the Spring shall blow\n" + + "Her clarion o'er the dreaming earth, and fill\n" + + "(Driving sweet buds like flocks to feed in air)\n" + + "With living hues and odors plain and hill:\n" + + "Wild Spirit, which art moving everywhere;\n" + + "Destroyer and preserver; hear, oh, hear!"; Assert.assertEquals(Status.OK, storage.directUpload(content, testFile)); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java index 885829059be5d1..0783d2661889a7 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java @@ -69,25 +69,25 @@ public static void beforeClass() throws Exception { Catalog.getCurrentCatalog().createDb(createDbStmt); MetricRepo.init(); - createTable("create table test.table1\n" + - "(k1 int, k2 int) distributed by hash(k1) buckets 1\n" + - "properties(\"replication_num\" = \"1\");"); - - createTable("create table test.table2\n" + - "(k1 datetime, k2 int)\n" + - "ENGINE=OLAP\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - "PARTITION p20211213 VALUES [('2021-12-13 00:00:00'), ('2021-12-14 00:00:00')),\n" + - "PARTITION p20211214 VALUES [('2021-12-14 00:00:00'), ('2021-12-15 00:00:00')),\n" + - "PARTITION p20211215 VALUES [('2021-12-15 00:00:00'), ('2021-12-16 00:00:00')),\n" + - "PARTITION p20211216 VALUES [('2021-12-16 00:00:00'), ('2021-12-17 00:00:00'))\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1)\n" + - "BUCKETS 10\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); + createTable("create table test.table1\n" + + "(k1 int, k2 int) distributed by hash(k1) buckets 1\n" + + "properties(\"replication_num\" = \"1\");"); + + createTable("create table test.table2\n" + + "(k1 datetime, k2 int)\n" + + "ENGINE=OLAP\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "PARTITION p20211213 VALUES [('2021-12-13 00:00:00'), ('2021-12-14 00:00:00')),\n" + + "PARTITION p20211214 VALUES [('2021-12-14 00:00:00'), ('2021-12-15 00:00:00')),\n" + + "PARTITION p20211215 VALUES [('2021-12-15 00:00:00'), ('2021-12-16 00:00:00')),\n" + + "PARTITION p20211216 VALUES [('2021-12-16 00:00:00'), ('2021-12-17 00:00:00'))\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1)\n" + + "BUCKETS 10\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); } @@ -261,8 +261,10 @@ public void testAlterInvalid() throws Exception { @Test public void testNormalCreate() throws Exception { - String createSql = "CREATE SQL_BLOCK_RULE test_rule PROPERTIES(\"sql\"=\"select \\\\* from test_table\",\"enable\"=\"true\")"; - CreateSqlBlockRuleStmt createSqlBlockRuleStmt = (CreateSqlBlockRuleStmt) UtFrameUtils.parseAndAnalyzeStmt(createSql, connectContext); + String createSql = "CREATE SQL_BLOCK_RULE test_rule" + + " PROPERTIES(\"sql\"=\"select \\\\* from test_table\",\"enable\"=\"true\")"; + CreateSqlBlockRuleStmt createSqlBlockRuleStmt // CHECKSTYLE IGNORE THIS LINE + = (CreateSqlBlockRuleStmt) UtFrameUtils.parseAndAnalyzeStmt(createSql, connectContext); } @Test @@ -325,7 +327,7 @@ public void testUserPropertyInvalid() throws Exception { } @Test - public void testAlterSqlBlock() throws Exception{ + public void testAlterSqlBlock() throws Exception { Analyzer analyzer = new Analyzer(Catalog.getCurrentCatalog(), connectContext); SqlBlockRuleMgr mgr = Catalog.getCurrentCatalog().getSqlBlockRuleMgr(); @@ -346,9 +348,9 @@ public void testAlterSqlBlock() throws Exception{ Assert.assertEquals("select \\* from test_table", alteredSqlBlockRule.getSql()); Assert.assertEquals("NULL", alteredSqlBlockRule.getSqlHash()); - Assert.assertEquals(0L, (long)alteredSqlBlockRule.getPartitionNum()); - Assert.assertEquals(0L, (long)alteredSqlBlockRule.getTabletNum()); - Assert.assertEquals(0L, (long)alteredSqlBlockRule.getCardinality()); + Assert.assertEquals(0L, (long) alteredSqlBlockRule.getPartitionNum()); + Assert.assertEquals(0L, (long) alteredSqlBlockRule.getTabletNum()); + Assert.assertEquals(0L, (long) alteredSqlBlockRule.getCardinality()); Assert.assertEquals(false, alteredSqlBlockRule.getGlobal()); Assert.assertEquals(true, alteredSqlBlockRule.getEnable()); @@ -369,9 +371,9 @@ public void testAlterSqlBlock() throws Exception{ Assert.assertEquals("NULL", alteredSqlBlockRule2.getSql()); Assert.assertEquals("NULL", alteredSqlBlockRule2.getSqlHash()); - Assert.assertEquals(100L, (long)alteredSqlBlockRule2.getPartitionNum()); - Assert.assertEquals(500L, (long)alteredSqlBlockRule2.getTabletNum()); - Assert.assertEquals(0L, (long)alteredSqlBlockRule2.getCardinality()); + Assert.assertEquals(100L, (long) alteredSqlBlockRule2.getPartitionNum()); + Assert.assertEquals(500L, (long) alteredSqlBlockRule2.getTabletNum()); + Assert.assertEquals(0L, (long) alteredSqlBlockRule2.getCardinality()); Assert.assertEquals(true, alteredSqlBlockRule2.getGlobal()); Assert.assertEquals(true, alteredSqlBlockRule2.getEnable()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/AdminStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/AdminStmtTest.java index 8a0c170a904016..d3b4bbdc39ef93 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/AdminStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/AdminStmtTest.java @@ -41,15 +41,15 @@ public class AdminStmtTest extends TestWithFeService { @Override protected void runBeforeAll() throws Exception { createDatabase("test"); - createTable( "CREATE TABLE test.tbl1 (\n" + - " `id` int(11) NULL COMMENT \"\",\n" + - " `id2` bitmap bitmap_union NULL\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`id`)\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 3\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + createTable("CREATE TABLE test.tbl1 (\n" + + " `id` int(11) NULL COMMENT \"\",\n" + + " `id2` bitmap bitmap_union NULL\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`id`)\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 3\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogOperationTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogOperationTest.java index ad215513777619..6d33953d9dac9c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogOperationTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogOperationTest.java @@ -56,36 +56,36 @@ public static void beforeClass() throws Exception { CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext); Catalog.getCurrentCatalog().createDb(createDbStmt); - createTable("create table test.renameTest\n" + - "(k1 int)\n" + - "distributed by hash(k1) buckets 1\n" + - "properties(\"replication_num\" = \"1\");"); - - createResource("CREATE EXTERNAL RESOURCE \"mysql_resource\"\n" + - "PROPERTIES\n" + - "(\n" + - " \"type\" = \"odbc_catalog\",\n" + - " \"user\" = \"mysql_user\",\n" + - " \"password\" = \"mysql_passwd\",\n" + - " \"host\" = \"127.0.0.1\",\n" + - " \"port\" = \"8239\"\n" + - ");"); - - createTable("CREATE EXTERNAL TABLE test.mysqlRenameTest\n" + - "(\n" + - "k1 DATE,\n" + - "k2 INT,\n" + - "k3 SMALLINT,\n" + - "k4 VARCHAR(2048),\n" + - "k5 DATETIME\n" + - ")\n" + - "ENGINE=mysql\n" + - "PROPERTIES\n" + - "(\n" + - "\"odbc_catalog_resource\" = \"mysql_resource\",\n" + - "\"database\" = \"mysql_db_test\",\n" + - "\"table\" = \"mysql_table_test\"\n" + - ");"); + createTable("create table test.renameTest\n" + + "(k1 int)\n" + + "distributed by hash(k1) buckets 1\n" + + "properties(\"replication_num\" = \"1\");"); + + createResource("CREATE EXTERNAL RESOURCE \"mysql_resource\"\n" + + "PROPERTIES\n" + + "(\n" + + " \"type\" = \"odbc_catalog\",\n" + + " \"user\" = \"mysql_user\",\n" + + " \"password\" = \"mysql_passwd\",\n" + + " \"host\" = \"127.0.0.1\",\n" + + " \"port\" = \"8239\"\n" + + ");"); + + createTable("CREATE EXTERNAL TABLE test.mysqlRenameTest\n" + + "(\n" + + "k1 DATE,\n" + + "k2 INT,\n" + + "k3 SMALLINT,\n" + + "k4 VARCHAR(2048),\n" + + "k5 DATETIME\n" + + ")\n" + + "ENGINE=mysql\n" + + "PROPERTIES\n" + + "(\n" + + "\"odbc_catalog_resource\" = \"mysql_resource\",\n" + + "\"database\" = \"mysql_db_test\",\n" + + "\"table\" = \"mysql_table_test\"\n" + + ");"); } @AfterClass @@ -108,7 +108,7 @@ private static void createResource(String sql) throws Exception { public void testRenameTable() throws Exception { // rename olap table String renameTblStmt = "alter table test.renameTest rename newNewTest"; - AlterTableStmt alterTableStmt = (AlterTableStmt)UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); + AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); Database db = Catalog.getCurrentCatalog().getDbNullable("default_cluster:test"); Assert.assertNotNull(db); @@ -135,7 +135,7 @@ public void testRenameTable() throws Exception { Thread.sleep(1000); renameTblStmt = "alter table test.newNewTest rename r1"; - alterTableStmt = (AlterTableStmt)UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); + alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); try { Catalog.getCurrentCatalog().getAlterInstance().processAlterTable(alterTableStmt); Assert.fail(); @@ -144,14 +144,14 @@ public void testRenameTable() throws Exception { } renameTblStmt = "alter table test.newNewTest rename goodName"; - alterTableStmt = (AlterTableStmt)UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); + alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); Catalog.getCurrentCatalog().getAlterInstance().processAlterTable(alterTableStmt); Assert.assertNull(db.getTableNullable("newNewTest")); Assert.assertNotNull(db.getTableNullable("goodName")); // rename external table renameTblStmt = "alter table test.mysqlRenameTest rename newMysqlRenameTest"; - alterTableStmt = (AlterTableStmt)UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); + alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(renameTblStmt, connectContext); Assert.assertNotNull(db.getTableNullable("mysqlRenameTest")); Catalog.getCurrentCatalog().getAlterInstance().processAlterTable(alterTableStmt); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTest.java index 330794f713480c..d86c8e3455d223 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTest.java @@ -140,7 +140,7 @@ public void testSaveLoadHeader() throws Exception { DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file))); catalog = Catalog.getCurrentCatalog(); - long checksum2 = catalog.loadHeader(dis, MetaHeader.EMPTY_HEADER ,0); + long checksum2 = catalog.loadHeader(dis, MetaHeader.EMPTY_HEADER, 0); Assert.assertEquals(checksum1, checksum2); dis.close(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java index f4b7177c43e7fb..ab6880332a041d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java @@ -173,7 +173,6 @@ public static Database createSimpleDb(long dbId, long tableId, long partitionId, Catalog.getCurrentInvertedIndex().clear(); // replica - long replicaId = 0; Replica replica1 = new Replica(testReplicaId1, testBackendId1, version, 0, 0L, 0L, ReplicaState.NORMAL, -1, 0); Replica replica2 = new Replica(testReplicaId2, testBackendId2, version, 0, 0L, 0L, diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColocateTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColocateTableTest.java index b5b207b1089496..5338e8aa40e718 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColocateTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColocateTableTest.java @@ -66,8 +66,8 @@ public static void beforeClass() throws Exception { @AfterClass public static void tearDown() { - File file = new File(runningDir); - file.delete(); + File file = new File(runningDir); + file.delete(); } @Before @@ -97,17 +97,17 @@ private static void alterTable(String sql) throws Exception { @Test public void testCreateOneTable() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); ColocateTableIndex index = Catalog.getCurrentColocateIndex(); Database db = Catalog.getCurrentCatalog().getDbOrMetaException(fullDbName); @@ -140,29 +140,29 @@ public void testCreateOneTable() throws Exception { @Test public void testCreateTwoTableWithSameGroup() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); - - createTable("create table " + dbName + "." + tableName2 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); + + createTable("create table " + dbName + "." + tableName2 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); ColocateTableIndex index = Catalog.getCurrentColocateIndex(); Database db = Catalog.getCurrentCatalog().getDbOrMetaException(fullDbName); @@ -209,135 +209,135 @@ public void testCreateTwoTableWithSameGroup() throws Exception { @Test public void testBucketNum() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); expectedEx.expect(DdlException.class); expectedEx.expectMessage("Colocate tables must have same bucket num: 1"); - createTable("create table " + dbName + "." + tableName2 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 2\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName2 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 2\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); } @Test public void testReplicationNum() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); expectedEx.expect(DdlException.class); expectedEx.expectMessage("Colocate tables must have same replication allocation: tag.location.default: 1"); - createTable("create table " + dbName + "." + tableName2 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"2\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName2 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"2\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); } @Test public void testDistributionColumnsSize() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); expectedEx.expect(DdlException.class); expectedEx.expectMessage("Colocate tables distribution columns size must be same : 2"); - createTable("create table " + dbName + "." + tableName2 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName2 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); } @Test public void testDistributionColumnsType() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); expectedEx.expect(DdlException.class); expectedEx.expectMessage("Colocate tables distribution columns must have the same data type: k2 should be INT"); - createTable("create table " + dbName + "." + tableName2 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName2 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); } @Test public void testModifyGroupNameForBucketSeqInconsistent() throws Exception { - createTable("create table " + dbName + "." + tableName1 + " (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` varchar(10) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\",\n" + - " \"colocate_with\" = \"" + groupName + "\"\n" + - ");"); + createTable("create table " + dbName + "." + tableName1 + " (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` varchar(10) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\",\n" + + " \"colocate_with\" = \"" + groupName + "\"\n" + + ");"); ColocateTableIndex index = Catalog.getCurrentColocateIndex(); Database db = Catalog.getCurrentCatalog().getDbOrMetaException(fullDbName); @@ -348,7 +348,8 @@ public void testModifyGroupNameForBucketSeqInconsistent() throws Exception { Assert.assertEquals(1, backendIds1.get(Tag.DEFAULT_BACKEND_TAG).get(0).size()); // set same group name - alterTable("ALTER TABLE "+ dbName + "." + tableName1 + " SET (" + "\"colocate_with\" = \"" + groupName + "\")"); + alterTable("ALTER TABLE " + dbName + "." + tableName1 + + " SET (" + "\"colocate_with\" = \"" + groupName + "\")"); GroupId groupId2 = index.getGroup(tableId); // verify groupId group2BackendsPerBucketSeq diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTest.java index d9f76357284370..d17114aabfce2a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTest.java @@ -125,7 +125,7 @@ public void testSchemaChangeIntToVarchar() throws DdlException { @Test(expected = DdlException.class) public void testSchemaChangeFloatToVarchar() throws DdlException { Column oldColumn = new Column("b", ScalarType.createType(PrimitiveType.FLOAT), false, null, true, "0", ""); - Column newColumn = new Column("b", ScalarType.createType(PrimitiveType.VARCHAR, 23 , 0, 0), false, null, true, "0", ""); + Column newColumn = new Column("b", ScalarType.createType(PrimitiveType.VARCHAR, 23, 0, 0), false, null, true, "0", ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } @@ -141,7 +141,7 @@ public void testSchemaChangeDecimalToVarchar() throws DdlException { @Test(expected = DdlException.class) public void testSchemaChangeDoubleToVarchar() throws DdlException { Column oldColumn = new Column("c", ScalarType.createType(PrimitiveType.DOUBLE), false, null, true, "0", ""); - Column newColumn = new Column("c", ScalarType.createType(PrimitiveType.VARCHAR, 31 , 0, 0), false, null, true, "0", ""); + Column newColumn = new Column("c", ScalarType.createType(PrimitiveType.VARCHAR, 31, 0, 0), false, null, true, "0", ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateEncryptKeyTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateEncryptKeyTest.java index db88440d50251f..ed4c055e42616a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateEncryptKeyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateEncryptKeyTest.java @@ -86,7 +86,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); PlanFragment fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - UnionNode unionNode = (UnionNode)fragment.getPlanRoot(); + UnionNode unionNode = (UnionNode) fragment.getPlanRoot(); List> constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); Assert.assertEquals(1, constExprLists.size()); Assert.assertEquals(1, constExprLists.get(0).size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java index 55adf8afd342a6..05f25c937b9f75 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java @@ -89,13 +89,13 @@ public void test() throws Exception { Database db = Catalog.getCurrentCatalog().getDbNullable("default_cluster:db1"); Assert.assertNotNull(db); - String createFuncStr = "create function db1.my_add(VARCHAR(1024)) RETURNS BOOLEAN properties\n" + - "(\n" + - "\"symbol\" = \"_ZN9doris_udf6AddUdfEPNS_15FunctionContextERKNS_9StringValE\",\n" + - "\"prepare_fn\" = \"_ZN9doris_udf13AddUdfPrepareEPNS_15FunctionContextENS0_18FunctionStateScopeE\",\n" + - "\"close_fn\" = \"_ZN9doris_udf11AddUdfCloseEPNS_15FunctionContextENS0_18FunctionStateScopeE\",\n" + - "\"object_file\" = \"http://127.0.0.1:8008/libcmy_udf.so\"\n" + - ");"; + String createFuncStr = "create function db1.my_add(VARCHAR(1024)) RETURNS BOOLEAN properties\n" + + "(\n" + + "\"symbol\" = \"_ZN9doris_udf6AddUdfEPNS_15FunctionContextERKNS_9StringValE\",\n" + + "\"prepare_fn\" = \"_ZN9doris_udf13AddUdfPrepareEPNS_15FunctionContextENS0_18FunctionStateScopeE\",\n" + + "\"close_fn\" = \"_ZN9doris_udf11AddUdfCloseEPNS_15FunctionContextENS0_18FunctionStateScopeE\",\n" + + "\"object_file\" = \"http://127.0.0.1:8008/libcmy_udf.so\"\n" + + ");"; CreateFunctionStmt createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); Catalog.getCurrentCatalog().createFunction(createFunctionStmt); @@ -113,7 +113,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); PlanFragment fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - UnionNode unionNode = (UnionNode)fragment.getPlanRoot(); + UnionNode unionNode = (UnionNode) fragment.getPlanRoot(); List> constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); Assert.assertEquals(1, constExprLists.size()); Assert.assertEquals(1, constExprLists.get(0).size()); @@ -136,7 +136,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode)fragment.getPlanRoot(); + unionNode = (UnionNode) fragment.getPlanRoot(); constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); Assert.assertEquals(1, constExprLists.size()); Assert.assertEquals(1, constExprLists.get(0).size()); @@ -147,8 +147,8 @@ public void test() throws Exception { // create alias function with cast // cast any type to decimal with specific precision and scale - createFuncStr = "create alias function db1.decimal(all, int, int) with parameter(col, precision, scale)" + - " as cast(col as decimal(precision, scale));"; + createFuncStr = "create alias function db1.decimal(all, int, int) with parameter(col, precision, scale)" + + " as cast(col as decimal(precision, scale));"; createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); Catalog.getCurrentCatalog().createFunction(createFunctionStmt); @@ -164,7 +164,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode)fragment.getPlanRoot(); + unionNode = (UnionNode) fragment.getPlanRoot(); constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); System.out.println(constExprLists.get(0).get(0)); Assert.assertTrue(constExprLists.get(0).get(0) instanceof StringLiteral); @@ -173,8 +173,8 @@ public void test() throws Exception { Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMAL(4,1))")); // cast any type to varchar with fixed length - createFuncStr = "create alias function db1.varchar(all, int) with parameter(text, length) as " + - "cast(text as varchar(length));"; + createFuncStr = "create alias function db1.varchar(all, int) with parameter(text, length) as " + + "cast(text as varchar(length));"; createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); Catalog.getCurrentCatalog().createFunction(createFunctionStmt); @@ -190,7 +190,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode)fragment.getPlanRoot(); + unionNode = (UnionNode) fragment.getPlanRoot(); constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); Assert.assertEquals(1, constExprLists.size()); Assert.assertEquals(1, constExprLists.get(0).size()); @@ -200,8 +200,8 @@ public void test() throws Exception { Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k1` AS CHARACTER)")); // cast any type to char with fixed length - createFuncStr = "create alias function db1.char(all, int) with parameter(text, length) as " + - "cast(text as char(length));"; + createFuncStr = "create alias function db1.char(all, int) with parameter(text, length) as " + + "cast(text as char(length));"; createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); Catalog.getCurrentCatalog().createFunction(createFunctionStmt); @@ -217,7 +217,7 @@ public void test() throws Exception { Assert.assertEquals(1, planner.getFragments().size()); fragment = planner.getFragments().get(0); Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode)fragment.getPlanRoot(); + unionNode = (UnionNode) fragment.getPlanRoot(); constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); Assert.assertEquals(1, constExprLists.size()); Assert.assertEquals(1, constExprLists.get(0).size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java index 1253f22579e1f8..93dbf9f7eae673 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java @@ -67,15 +67,15 @@ private static void createTable(String sql) throws Exception { } @Test - public void testDuplicateCreateTable() throws Exception{ + public void testDuplicateCreateTable() throws Exception { // test Catalog catalog = Catalog.getCurrentCatalog(); String sql = "create table if not exists test.tbl1_colocate\n" + "(k1 int, k2 int)\n" + "duplicate key(k1)\n" + "distributed by hash(k2) buckets 1\n" + "properties('replication_num' = '1','colocate_with'='test'); "; createTable(sql); Set tabletIdSetAfterCreateFirstTable = catalog.getTabletInvertedIndex().getReplicaMetaTable().rowKeySet(); - Set tabletMetaSetBeforeCreateFirstTable = new HashSet<>(); - catalog.getTabletInvertedIndex().getTabletMetaTable().values().forEach(tabletMeta -> {tabletMetaSetBeforeCreateFirstTable.add(tabletMeta);}); + Set tabletMetaSetBeforeCreateFirstTable = + new HashSet<>(catalog.getTabletInvertedIndex().getTabletMetaTable().values()); Set colocateTableIdBeforeCreateFirstTable = catalog.getColocateTableIndex().getTable2Group().keySet(); Assert.assertTrue(colocateTableIdBeforeCreateFirstTable.size() > 0); Assert.assertTrue(tabletIdSetAfterCreateFirstTable.size() > 0); @@ -85,8 +85,8 @@ public void testDuplicateCreateTable() throws Exception{ Set tabletIdSetAfterDuplicateCreateTable1 = catalog.getTabletInvertedIndex().getReplicaMetaTable().rowKeySet(); Set tabletIdSetAfterDuplicateCreateTable2 = catalog.getTabletInvertedIndex().getBackingReplicaMetaTable().columnKeySet(); Set tabletIdSetAfterDuplicateCreateTable3 = catalog.getTabletInvertedIndex().getTabletMetaMap().keySet(); - Set tabletIdSetAfterDuplicateCreateTable4 = new HashSet<>(); - catalog.getTabletInvertedIndex().getTabletMetaTable().values().forEach(tabletMeta -> {tabletIdSetAfterDuplicateCreateTable4.add(tabletMeta);}); + Set tabletIdSetAfterDuplicateCreateTable4 = + new HashSet<>(catalog.getTabletInvertedIndex().getTabletMetaTable().values()); Assert.assertTrue(tabletIdSetAfterCreateFirstTable.equals(tabletIdSetAfterDuplicateCreateTable1)); Assert.assertTrue(tabletIdSetAfterCreateFirstTable.equals(tabletIdSetAfterDuplicateCreateTable2)); @@ -354,86 +354,86 @@ public void testAbnormal() throws DdlException { // list contain less than ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "You can only use in values to create list partitions", - () -> createTable("CREATE TABLE test.tbl14 (\n" + - " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY LIST(k1)\n" + - "(\n" + - " PARTITION p1 VALUES less than (\"1\"),\n" + - " PARTITION p2 VALUES less than (\"2\"),\n" + - " partition p3 values less than (\"5\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl14 (\n" + + " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY LIST(k1)\n" + + "(\n" + + " PARTITION p1 VALUES less than (\"1\"),\n" + + " PARTITION p2 VALUES less than (\"2\"),\n" + + " partition p3 values less than (\"5\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); // range contain in ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "You can only use fixed or less than values to create range partitions", - () -> createTable("CREATE TABLE test.tbl15 (\n" + - " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY range(k1)\n" + - "(\n" + - " PARTITION p1 VALUES in (\"1\"),\n" + - " PARTITION p2 VALUES in (\"2\"),\n" + - " partition p3 values in (\"5\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl15 (\n" + + " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY range(k1)\n" + + "(\n" + + " PARTITION p1 VALUES in (\"1\"),\n" + + " PARTITION p2 VALUES in (\"2\"),\n" + + " partition p3 values in (\"5\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); // list contain both ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "You can only use in values to create list partitions", - () -> createTable("CREATE TABLE test.tbl15 (\n" + - " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY LIST(k1)\n" + - "(\n" + - " PARTITION p1 VALUES in (\"1\"),\n" + - " PARTITION p2 VALUES in (\"2\"),\n" + - " partition p3 values less than (\"5\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl15 (\n" + + " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY LIST(k1)\n" + + "(\n" + + " PARTITION p1 VALUES in (\"1\"),\n" + + " PARTITION p2 VALUES in (\"2\"),\n" + + " partition p3 values less than (\"5\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); // range contain both ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "You can only use fixed or less than values to create range partitions", - () -> createTable("CREATE TABLE test.tbl16 (\n" + - " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 VALUES less than (\"1\"),\n" + - " PARTITION p2 VALUES less than (\"2\"),\n" + - " partition p3 values in (\"5\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl16 (\n" + + " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 VALUES less than (\"1\"),\n" + + " PARTITION p2 VALUES less than (\"2\"),\n" + + " partition p3 values in (\"5\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); // range: partition content != partition key type ExceptionChecker .expectThrowsWithMsg(DdlException.class, "Invalid number format: beijing", - () -> createTable("CREATE TABLE test.tbl17 (\n" + - " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY range(k1)\n" + - "(\n" + - " PARTITION p1 VALUES less than (\"beijing\"),\n" + - " PARTITION p2 VALUES less than (\"shanghai\"),\n" + - " partition p3 values less than (\"tianjin\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl17 (\n" + + " k1 int, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY range(k1)\n" + + "(\n" + + " PARTITION p1 VALUES less than (\"beijing\"),\n" + + " PARTITION p2 VALUES less than (\"shanghai\"),\n" + + " partition p3 values less than (\"tianjin\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); // list: partition content != partition key type ExceptionChecker .expectThrowsWithMsg(DdlException.class, "Invalid number format: beijing", - () -> createTable("CREATE TABLE test.tbl18 (\n" + - " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + - ")\n" + - "PARTITION BY list(k1)\n" + - "(\n" + - " PARTITION p1 VALUES in (\"beijing\"),\n" + - " PARTITION p2 VALUES in (\"shanghai\"),\n" + - " partition p3 values in (\"tianjin\")\n" + - ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");")); + () -> createTable("CREATE TABLE test.tbl18 (\n" + + " k1 int not null, k2 varchar(128), k3 int, v1 int, v2 int\n" + + ")\n" + + "PARTITION BY list(k1)\n" + + "(\n" + + " PARTITION p1 VALUES in (\"beijing\"),\n" + + " PARTITION p2 VALUES in (\"shanghai\"),\n" + + " partition p3 values in (\"tianjin\")\n" + + ")DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");")); /** * dynamic partition table @@ -441,39 +441,39 @@ public void testAbnormal() throws DdlException { // list partition with dynamic properties ExceptionChecker .expectThrowsWithMsg(DdlException.class, "Only support dynamic partition properties on range partition table", - () -> createTable("CREATE TABLE test.tbl19\n" + - "(\n" + - " k1 DATE not null\n" + - ")\n" + - "PARTITION BY LIST(k1) ()\n" + - "DISTRIBUTED BY HASH(k1)\n" + - "PROPERTIES\n" + - "(\n" + - " \"dynamic_partition.enable\" = \"true\",\n" + - " \"dynamic_partition.time_unit\" = \"MONTH\",\n" + - " \"dynamic_partition.end\" = \"2\",\n" + - " \"dynamic_partition.prefix\" = \"p\",\n" + - " \"dynamic_partition.buckets\" = \"8\",\n" + - " \"dynamic_partition.start_day_of_month\" = \"3\"\n" + - ");\n")); + () -> createTable("CREATE TABLE test.tbl19\n" + + "(\n" + + " k1 DATE not null\n" + + ")\n" + + "PARTITION BY LIST(k1) ()\n" + + "DISTRIBUTED BY HASH(k1)\n" + + "PROPERTIES\n" + + "(\n" + + " \"dynamic_partition.enable\" = \"true\",\n" + + " \"dynamic_partition.time_unit\" = \"MONTH\",\n" + + " \"dynamic_partition.end\" = \"2\",\n" + + " \"dynamic_partition.prefix\" = \"p\",\n" + + " \"dynamic_partition.buckets\" = \"8\",\n" + + " \"dynamic_partition.start_day_of_month\" = \"3\"\n" + + ");\n")); // no partition table with dynamic properties ExceptionChecker .expectThrowsWithMsg(DdlException.class, "Only support dynamic partition properties on range partition table", - () -> createTable("CREATE TABLE test.tbl20\n" + - "(\n" + - " k1 DATE\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1)\n" + - "PROPERTIES\n" + - "(\n" + - " \"dynamic_partition.enable\" = \"true\",\n" + - " \"dynamic_partition.time_unit\" = \"MONTH\",\n" + - " \"dynamic_partition.end\" = \"2\",\n" + - " \"dynamic_partition.prefix\" = \"p\",\n" + - " \"dynamic_partition.buckets\" = \"8\",\n" + - " \"dynamic_partition.start_day_of_month\" = \"3\"\n" + - ");")); + () -> createTable("CREATE TABLE test.tbl20\n" + + "(\n" + + " k1 DATE\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1)\n" + + "PROPERTIES\n" + + "(\n" + + " \"dynamic_partition.enable\" = \"true\",\n" + + " \"dynamic_partition.time_unit\" = \"MONTH\",\n" + + " \"dynamic_partition.end\" = \"2\",\n" + + " \"dynamic_partition.prefix\" = \"p\",\n" + + " \"dynamic_partition.buckets\" = \"8\",\n" + + " \"dynamic_partition.start_day_of_month\" = \"3\"\n" + + ");")); } @@ -483,51 +483,51 @@ public void testZOrderTable() { ExceptionChecker.expectThrowsNoException(() -> createTable( "create table test.zorder_tbl1\n" + "(k1 varchar(40), k2 int, k3 int)\n" + "duplicate key(k1, k2, k3)\n" + "partition by range(k2)\n" + "(partition p1 values less than(\"10\"))\n" - + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + - " 'data_sort.sort_type' = 'lexical');")); + + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + + " 'data_sort.sort_type' = 'lexical');")); // create z-order sort table, default col_num ExceptionChecker.expectThrowsNoException(() -> createTable( "create table test.zorder_tbl2\n" + "(k1 varchar(40), k2 int, k3 int)\n" + "duplicate key(k1, k2, k3)\n" + "partition by range(k2)\n" + "(partition p1 values less than(\"10\"))\n" - + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + - " 'data_sort.sort_type' = 'zorder');")); + + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + + " 'data_sort.sort_type' = 'zorder');")); // create z-order sort table, define sort_col_num ExceptionChecker.expectThrowsNoException(() -> createTable( "create table test.zorder_tbl3\n" + "(k1 varchar(40), k2 int, k3 int)\n" + "duplicate key(k1, k2, k3)\n" + "partition by range(k2)\n" + "(partition p1 values less than(\"10\"))\n" - + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + - " 'data_sort.sort_type' = 'zorder'," + - " 'data_sort.col_num' = '2');")); + + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + + " 'data_sort.sort_type' = 'zorder'," + + " 'data_sort.col_num' = '2');")); // create z-order sort table, only 1 sort column ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "z-order needs 2 columns at least, 3 columns at most", () -> createTable("create table test.zorder_tbl4\n" + "(k1 varchar(40), k2 int, k3 int)\n" + "duplicate key(k1, k2, k3)\n" + "partition by range(k2)\n" + "(partition p1 values less than(\"10\"))\n" - + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + - " 'data_sort.sort_type' = 'zorder'," + - " 'data_sort.col_num' = '1');")); + + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + + " 'data_sort.sort_type' = 'zorder'," + + " 'data_sort.col_num' = '1');")); // create z-order sort table, sort column is empty ExceptionChecker .expectThrowsWithMsg(AnalysisException.class, "param data_sort.col_num error", () -> createTable("create table test.zorder_tbl4\n" + "(k1 varchar(40), k2 int, k3 int)\n" + "duplicate key(k1, k2, k3)\n" + "partition by range(k2)\n" + "(partition p1 values less than(\"10\"))\n" - + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + - " 'data_sort.sort_type' = 'zorder'," + - " 'data_sort.col_num' = '');")); + + "distributed by hash(k1) buckets 1\n" + "properties('replication_num' = '1'," + + " 'data_sort.sort_type' = 'zorder'," + + " 'data_sort.col_num' = '');")); } @Test public void testCreateTableWithArrayType() throws Exception { Config.enable_complex_type_support = true; ExceptionChecker.expectThrowsNoException(() -> { - createTable("create table test.table1(k1 INT, k2 Array) duplicate key (k1) " + - "distributed by hash(k1) buckets 1 properties('replication_num' = '1');"); + createTable("create table test.table1(k1 INT, k2 Array) duplicate key (k1) " + + "distributed by hash(k1) buckets 1 properties('replication_num' = '1');"); }); ExceptionChecker.expectThrowsNoException(() -> { - createTable("create table test.table2(k1 INT, k2 Array>) duplicate key (k1) " + - "distributed by hash(k1) buckets 1 properties('replication_num' = '1');"); + createTable("create table test.table2(k1 INT, k2 Array>) duplicate key (k1) " + + "distributed by hash(k1) buckets 1 properties('replication_num' = '1');"); }); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateViewTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateViewTest.java index 364cf13f31a0e6..3a895b06271ffb 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateViewTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateViewTest.java @@ -84,8 +84,8 @@ public void testNormal() throws DdlException { () -> createView("create view test.view4 as select abs(-1) as s1;")); ExceptionChecker.expectThrowsNoException( - () -> createView("create view test.view5 as select * from test.tbl1 where hour(now()) > 3" + - " and curdate() > '2021-06-26';")); + () -> createView("create view test.view5 as select * from test.tbl1 where hour(now()) > 3" + + " and curdate() > '2021-06-26';")); Database db = Catalog.getCurrentCatalog().getDbOrDdlException("default_cluster:test"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DatabaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DatabaseTest.java index 9e96b8a509390d..bb3831861fd989 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DatabaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DatabaseTest.java @@ -231,7 +231,7 @@ public void testSerialization() throws Exception { OlapTable table = new OlapTable(1000, "table", columns, KeysType.AGG_KEYS, new SinglePartitionInfo(), new RandomDistributionInfo(10)); short shortKeyColumnCount = 1; - table.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS); + table.setIndexMeta(1000, "group1", columns, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS); List column = Lists.newArrayList(); column.add(column2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java index be2e5a014fd455..c2a6098b11dd7a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java @@ -90,31 +90,31 @@ private static void alterTable(String sql) throws Exception { @Test public void testNormal() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test"); OlapTable table = (OlapTable) db.getTableOrAnalysisException("dynamic_partition_normal"); @@ -123,30 +123,30 @@ public void testNormal() throws Exception { @Test public void testMissPrefix() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_prefix` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_prefix` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Must assign dynamic_partition.prefix properties"); createTable(createOlapTblStmt); @@ -154,30 +154,30 @@ public void testMissPrefix() throws Exception { @Test public void testMissTimeUnit() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_time_unit` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_time_unit` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Must assign dynamic_partition.time_unit properties"); createTable(createOlapTblStmt); @@ -185,59 +185,59 @@ public void testMissTimeUnit() throws Exception { @Test public void testMissStart() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_start` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_start` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); } @Test public void testMissEnd() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_end` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_end` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Must assign dynamic_partition.end properties"); createTable(createOlapTblStmt); @@ -245,30 +245,30 @@ public void testMissEnd() throws Exception { @Test public void testMissBuckets() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_buckets` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_buckets` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Must assign dynamic_partition.buckets properties"); createTable(createOlapTblStmt); @@ -276,25 +276,25 @@ public void testMissBuckets() throws Exception { @Test public void testNotAllowed() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_buckets` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_buckets` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Only support dynamic partition properties on range partition table"); createTable(createOlapTblStmt); @@ -302,31 +302,31 @@ public void testNotAllowed() throws Exception { @Test public void testNotAllowedInMultiPartitions() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1, k2)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\", \"100\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\", \"200\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\", \"300\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1, k2)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\", \"100\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\", \"200\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\", \"300\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Dynamic partition only support single-column range partition"); createTable(createOlapTblStmt); @@ -334,93 +334,93 @@ public void testNotAllowedInMultiPartitions() throws Exception { @Test public void testMissTimeZone() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_miss_time_zone` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_miss_time_zone` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\"\n" + + ");"; createTable(createOlapTblStmt); } @Test public void testNormalTimeZone() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_time_zone` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\",\n" + - "\"dynamic_partition.prefix\" = \"p\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_time_zone` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\",\n" + + "\"dynamic_partition.prefix\" = \"p\"\n" + + ");"; createTable(createOlapTblStmt); } @Test public void testInvalidTimeZone() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_invalid_time_zone` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.time_zone\" = \"invalid\",\n" + - "\"dynamic_partition.prefix\" = \"p\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_invalid_time_zone` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.time_zone\" = \"invalid\",\n" + + "\"dynamic_partition.prefix\" = \"p\"\n" + + ");"; expectedException.expect(DdlException.class); expectedException.expectMessage("errCode = 2, detailMessage = Unknown or incorrect time zone: 'invalid'"); createTable(createOlapTblStmt); @@ -429,32 +429,32 @@ public void testInvalidTimeZone() throws Exception { @Test public void testSetDynamicPartitionReplicationNum() throws Exception { String tableName = "dynamic_partition_replication_num"; - String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.replication_num\" = \"2\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.replication_num\" = \"2\"\n" + + ");"; createTable(createOlapTblStmt); Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test"); OlapTable table = (OlapTable) db.getTableOrAnalysisException(tableName); @@ -471,27 +471,27 @@ public void testSetDynamicPartitionReplicationNum() throws Exception { @Test public void testCreateDynamicPartitionImmediately() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`empty_dynamic_partition` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`empty_dynamic_partition` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("empty_dynamic_partition"); Assert.assertTrue(emptyDynamicTable.getAllPartitions().size() == 4); @@ -524,28 +524,28 @@ public void testCreateDynamicPartitionImmediately() throws Exception { @Test public void testFillHistoryDynamicPartition() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`histo_dynamic_partition` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`histo_dynamic_partition` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("histo_dynamic_partition"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -578,28 +578,28 @@ public void testFillHistoryDynamicPartition() throws Exception { @Test(expected = DdlException.class) public void testFillHistoryDynamicPartition2() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`histo_dynamic_partition2` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3000\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`histo_dynamic_partition2` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3000\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; // exceed the max dynamic partition limit Config.max_dynamic_partition_num = 1000; createTable(createOlapTblStmt); @@ -607,81 +607,81 @@ public void testFillHistoryDynamicPartition2() throws Exception { @Test public void testFillHistoryDynamicPartition3() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition3` (\n" + - " `k1` date NULL COMMENT \"\"\n" + - ")\n" + - "PARTITION BY RANGE (k1)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition3` (\n" + + " `k1` date NULL COMMENT \"\"\n" + + ")\n" + + "PARTITION BY RANGE (k1)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\"\n" + + ");"; // start and history_partition_num are not set, can not create history partition ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Provide start or history_partition_num property when creating history partition", () -> createTable(createOlapTblStmt)); - String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition3` (\n" + - " `k1` date NULL COMMENT \"\"\n" + - ")\n" + - "PARTITION BY RANGE (k1)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.history_partition_num\" = \"1000\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\"\n" + - ");"; + String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition3` (\n" + + " `k1` date NULL COMMENT \"\"\n" + + ")\n" + + "PARTITION BY RANGE (k1)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.history_partition_num\" = \"1000\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\"\n" + + ");"; // start is not set, but history_partition_num is set too large, can not create history partition ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Too many dynamic partitions", () -> createTable(createOlapTblStmt2)); - String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition3` (\n" + - " `k1` date NULL COMMENT \"\"\n" + - ")\n" + - "PARTITION BY RANGE (k1)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.start\" = \"-1000\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\"\n" + - ");"; + String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition3` (\n" + + " `k1` date NULL COMMENT \"\"\n" + + ")\n" + + "PARTITION BY RANGE (k1)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.start\" = \"-1000\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\"\n" + + ");"; // start is set but too small,history_partition_num is not set, can not create history partition ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Too many dynamic partitions", () -> createTable(createOlapTblStmt3)); - String createOlapTblStmt4 = "CREATE TABLE test.`dynamic_partition3` (\n" + - " `k1` date NULL COMMENT \"\"\n" + - ")\n" + - "PARTITION BY RANGE (k1)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.start\" = \"-10\",\n" + - "\"dynamic_partition.history_partition_num\" = \"5\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\"\n" + - ");"; + String createOlapTblStmt4 = "CREATE TABLE test.`dynamic_partition3` (\n" + + " `k1` date NULL COMMENT \"\"\n" + + ")\n" + + "PARTITION BY RANGE (k1)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.start\" = \"-10\",\n" + + "\"dynamic_partition.history_partition_num\" = \"5\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\"\n" + + ");"; // start and history_partition_num are set, create ok ExceptionChecker.expectThrowsNoException(() -> createTable(createOlapTblStmt4)); Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test"); @@ -710,23 +710,23 @@ public void testFillHistoryDynamicPartition3() throws Exception { @Test public void testFillHistoryDynamicPartitionWithHistoryPartitionNum() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`history_dynamic_partition_day` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.history_partition_num\" = \"10\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`history_dynamic_partition_day` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.history_partition_num\" = \"10\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("history_dynamic_partition_day"); Map tableProperties = emptyDynamicTable.getTableProperty().getProperties(); @@ -737,23 +737,23 @@ public void testFillHistoryDynamicPartitionWithHistoryPartitionNum() throws Exce @Test public void testAllTypeDynamicPartition() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`hour_dynamic_partition` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`hour_dynamic_partition` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("hour_dynamic_partition"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -764,23 +764,23 @@ public void testAllTypeDynamicPartition() throws Exception { Assert.assertEquals(11, partitionName.length()); } - createOlapTblStmt = "CREATE TABLE test.`week_dynamic_partition` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"week\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`week_dynamic_partition` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"week\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("week_dynamic_partition"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -791,23 +791,23 @@ public void testAllTypeDynamicPartition() throws Exception { Assert.assertEquals(8, partitionName.length()); } - createOlapTblStmt = "CREATE TABLE test.`month_dynamic_partition` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"month\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`month_dynamic_partition` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"month\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("month_dynamic_partition"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -818,23 +818,23 @@ public void testAllTypeDynamicPartition() throws Exception { Assert.assertEquals(7, partitionName.length()); } - createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_day` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_day` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("int_dynamic_partition_day"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -845,23 +845,23 @@ public void testAllTypeDynamicPartition() throws Exception { Assert.assertEquals(9, partitionName.length()); } - createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_week` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"week\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_week` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"week\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("int_dynamic_partition_week"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -872,23 +872,23 @@ public void testAllTypeDynamicPartition() throws Exception { Assert.assertEquals(8, partitionName.length()); } - createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_month` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"month\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_month` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"month\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); emptyDynamicTable = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("int_dynamic_partition_month"); Assert.assertEquals(7, emptyDynamicTable.getAllPartitions().size()); @@ -902,23 +902,23 @@ public void testAllTypeDynamicPartition() throws Exception { @Test(expected = DdlException.class) public void testHourDynamicPartitionWithIntType() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_hour` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`int_dynamic_partition_hour` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); } @@ -926,26 +926,26 @@ public void testHourDynamicPartitionWithIntType() throws Exception { public void testHotPartitionNum() throws Exception { Database testDb = Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test"); // 1. hour - String createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl1` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl1` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); - OlapTable tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_hour_tbl1"); + OlapTable tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_hour_tbl1"); RangePartitionInfo partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); Map idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(7, idToDataProperty.size()); @@ -959,26 +959,26 @@ public void testHotPartitionNum() throws Exception { ++count; } - createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl2` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"0\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl2` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"0\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_hour_tbl2"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_hour_tbl2"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(7, idToDataProperty.size()); @@ -986,26 +986,26 @@ public void testHotPartitionNum() throws Exception { Assert.assertEquals(TStorageMedium.HDD, dataProperty.getStorageMedium()); } - createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl3` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"3\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl3` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"3\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_hour_tbl3"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_hour_tbl3"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(7, idToDataProperty.size()); @@ -1020,25 +1020,25 @@ public void testHotPartitionNum() throws Exception { } // 2. day - createOlapTblStmt = "CREATE TABLE test.`hot_partition_day_tbl1` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"2\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_day_tbl1` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"2\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_day_tbl1"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_day_tbl1"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(4, idToDataProperty.size()); @@ -1046,26 +1046,26 @@ public void testHotPartitionNum() throws Exception { Assert.assertEquals(TStorageMedium.SSD, dataProperty.getStorageMedium()); } - createOlapTblStmt = "CREATE TABLE test.`hot_partition_day_tbl2` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"4\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"2\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_day_tbl2` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"4\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"2\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_day_tbl2"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_day_tbl2"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(8, idToDataProperty.size()); @@ -1079,26 +1079,26 @@ public void testHotPartitionNum() throws Exception { ++count; } // 3. week - createOlapTblStmt = "CREATE TABLE test.`hot_partition_week_tbl1` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"4\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"week\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"1\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_week_tbl1` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"4\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"week\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_week_tbl1"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_week_tbl1"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(8, idToDataProperty.size()); @@ -1112,26 +1112,26 @@ public void testHotPartitionNum() throws Exception { ++count; } // 4. month - createOlapTblStmt = "CREATE TABLE test.`hot_partition_month_tbl1` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"4\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"month\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"4\"\n" + - ");"; + createOlapTblStmt = "CREATE TABLE test.`hot_partition_month_tbl1` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"4\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"month\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"4\"\n" + + ");"; createTable(createOlapTblStmt); - tbl = (OlapTable)testDb.getTableOrAnalysisException("hot_partition_month_tbl1"); + tbl = (OlapTable) testDb.getTableOrAnalysisException("hot_partition_month_tbl1"); partitionInfo = (RangePartitionInfo) tbl.getPartitionInfo(); idToDataProperty = new TreeMap<>(partitionInfo.idToDataProperty); Assert.assertEquals(8, idToDataProperty.size()); @@ -1143,24 +1143,24 @@ public void testHotPartitionNum() throws Exception { @Test(expected = DdlException.class) public void testHotPartitionNumAbnormal() throws Exception { // dynamic_partition.hot_partition_num must larger than 0. - String createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl1` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "PARTITION BY RANGE(`k1`)\n" + - "()\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.create_history_partition\" = \"true\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.hot_partition_num\" = \"-1\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`hot_partition_hour_tbl1` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "PARTITION BY RANGE(`k1`)\n" + + "()\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.create_history_partition\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.hot_partition_num\" = \"-1\"\n" + + ");"; createTable(createOlapTblStmt); } @@ -1189,31 +1189,31 @@ public void testRuntimeInfo() throws Exception { @Test public void testMissReservedHistoryPeriods() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_miss_reserved_history_periods` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_miss_reserved_history_periods` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable table = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("dynamic_partition_miss_reserved_history_periods"); Assert.assertEquals("NULL", table.getTableProperty().getDynamicPartitionProperty().getReservedHistoryPeriods()); @@ -1221,102 +1221,102 @@ public void testMissReservedHistoryPeriods() throws Exception { @Test public void testNormalReservedHisrotyPeriods() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\"),\n" + - "PARTITION p4 VALUES LESS THAN (\"2020-06-01\"),\n" + - "PARTITION p5 VALUES LESS THAN (\"2020-06-20\"),\n" + - "PARTITION p6 VALUES LESS THAN (\"2020-10-25\"),\n" + - "PARTITION p7 VALUES LESS THAN (\"2020-11-01\"),\n" + - "PARTITION p8 VALUES LESS THAN (\"2020-11-11\"),\n" + - "PARTITION p9 VALUES LESS THAN (\"2020-11-21\"),\n" + - "PARTITION p10 VALUES LESS THAN (\"2021-04-20\"),\n" + - "PARTITION p11 VALUES LESS THAN (\"2021-05-20\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2020-06-01,2020-06-20],[2020-10-25,2020-11-15],[2021-06-01,2021-06-20]\"\n" + - ");"; + String createOlapTblStmt = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\"),\n" + + "PARTITION p4 VALUES LESS THAN (\"2020-06-01\"),\n" + + "PARTITION p5 VALUES LESS THAN (\"2020-06-20\"),\n" + + "PARTITION p6 VALUES LESS THAN (\"2020-10-25\"),\n" + + "PARTITION p7 VALUES LESS THAN (\"2020-11-01\"),\n" + + "PARTITION p8 VALUES LESS THAN (\"2020-11-11\"),\n" + + "PARTITION p9 VALUES LESS THAN (\"2020-11-21\"),\n" + + "PARTITION p10 VALUES LESS THAN (\"2021-04-20\"),\n" + + "PARTITION p11 VALUES LESS THAN (\"2021-05-20\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2020-06-01,2020-06-20],[2020-10-25,2020-11-15],[2021-06-01,2021-06-20]\"\n" + + ");"; createTable(createOlapTblStmt); OlapTable table = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("dynamic_partition_normal_reserved_history_periods"); Assert.assertEquals("[2020-06-01,2020-06-20],[2020-10-25,2020-11-15],[2021-06-01,2021-06-20]", table.getTableProperty().getDynamicPartitionProperty().getReservedHistoryPeriods()); Assert.assertEquals(table.getAllPartitions().size(), 9); - String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods2` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01 00:00:00\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-01-01 03:00:00\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-01-01 04:00:00\"),\n" + - "PARTITION p4 VALUES LESS THAN (\"2020-01-01 08:00:00\"),\n" + - "PARTITION p5 VALUES LESS THAN (\"2020-06-20 00:00:00\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2014-01-01 00:00:00,2014-01-01 03:00:00]\"\n" + - ");"; + String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods2` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01 00:00:00\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-01-01 03:00:00\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-01-01 04:00:00\"),\n" + + "PARTITION p4 VALUES LESS THAN (\"2020-01-01 08:00:00\"),\n" + + "PARTITION p5 VALUES LESS THAN (\"2020-06-20 00:00:00\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2014-01-01 00:00:00,2014-01-01 03:00:00]\"\n" + + ");"; createTable(createOlapTblStmt2); OlapTable table2 = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("dynamic_partition_normal_reserved_history_periods2"); Assert.assertEquals("[2014-01-01 00:00:00,2014-01-01 03:00:00]", table2.getTableProperty().getDynamicPartitionProperty().getReservedHistoryPeriods()); Assert.assertEquals(table2.getAllPartitions().size(), 6); - String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods3` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p202127 VALUES [(\"20200527\"), (\"20200628\"))\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k2`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"1\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2020-06-01,2020-06-30]\"\n" + - ");"; + String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition_normal_reserved_history_periods3` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p202127 VALUES [(\"20200527\"), (\"20200628\"))\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k2`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"1\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2020-06-01,2020-06-30]\"\n" + + ");"; createTable(createOlapTblStmt3); OlapTable table3 = (OlapTable) Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:test").getTableOrAnalysisException("dynamic_partition_normal_reserved_history_periods3"); Assert.assertEquals("[2020-06-01,2020-06-30]", table3.getTableProperty().getDynamicPartitionProperty().getReservedHistoryPeriods()); @@ -1325,201 +1325,202 @@ public void testNormalReservedHisrotyPeriods() throws Exception { @Test public void testInvalidReservedHistoryPeriods() throws Exception { - String createOlapTblStmt1 = "CREATE TABLE test.`dynamic_partition_invalid_reserved_history_periods1` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[20210101,2021-10-10]\"\n" + - ");"; + String createOlapTblStmt1 = "CREATE TABLE test.`dynamic_partition_invalid_reserved_history_periods1` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[20210101,2021-10-10]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [20210101,2021-10-10]. " + - "It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + - "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", + "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [20210101,2021-10-10]. " + + "It must be like \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + + "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", () -> createTable(createOlapTblStmt1)); - String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_invalid_reserved_history_periods2` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[0000-00-00,2021-10-10]\"\n" + - ");"; + String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_invalid_reserved_history_periods2` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[0000-00-00,2021-10-10]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = Invalid dynamic_partition.reserved_history_periods value. " + - "It must be correct DATE value " + - "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + - "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", + "errCode = 2, detailMessage = Invalid dynamic_partition.reserved_history_periods value. " + + "It must be correct DATE value " + + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + + "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", () -> createTable(createOlapTblStmt2)); } @Test public void testReservedHistoryPeriodsValidate() throws Exception { - String createOlapTblStmt1 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate1` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2021-01-01,]\"\n" + - ");"; + String createOlapTblStmt1 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate1` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2021-01-01,]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [2021-01-01,]. " + - "It must be like " + - "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + - "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", + "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [2021-01-01,]. " + + "It must be like " + + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", () -> createTable(createOlapTblStmt1)); - String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate2` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[,2021-01-01]\"\n" + - ");"; + String createOlapTblStmt2 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate2` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[,2021-01-01]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [,2021-01-01]. " + - "It must be like " + - "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + - "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", + "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [,2021-01-01]. " + + "It must be like " + + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH or " + + "\"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", () -> createTable(createOlapTblStmt2)); - String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate3` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2020-01-01,2020-03-01],[2021-10-01,2021-09-01]\"\n" + - ");"; + String createOlapTblStmt3 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate3` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2020-01-01,2020-03-01],[2021-10-01,2021-09-01]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, "errCode = 2, detailMessage = The first date is larger than the second date, [2021-10-01,2021-09-01] is invalid.", () -> createTable(createOlapTblStmt3)); - String createOlapTblStmt4 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate4` (\n" + - " `k1` datetime NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01 00:00:00\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01 00:00:00\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01 00:00:00\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.buckets\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"hour\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.reserved_history_periods\" = \"[2020-01-01,2020-03-01]\"\n" + - ");"; + String createOlapTblStmt4 = "CREATE TABLE test.`dynamic_partition_reserved_history_periods_validate4` (\n" + + " `k1` datetime NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01 00:00:00\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01 00:00:00\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01 00:00:00\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.buckets\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"hour\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.reserved_history_periods\" = \"[2020-01-01,2020-03-01]\"\n" + + ");"; ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \" value [2020-01-01,2020-03-01]. " + - "It must be like " + - "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + - "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", + "errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \"" + + " value [2020-01-01,2020-03-01]. " + + "It must be like " + + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH " + + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.", () -> createTable(createOlapTblStmt4)); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java index 78a613bf6130cf..62f7e487a231a4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java @@ -78,40 +78,40 @@ public void testModifyBackendTag() throws Exception { Assert.assertEquals(1, backends.size()); // create table - String createStr = "create table test.tbl1(\n" + - "k1 int\n" + - ") distributed by hash(k1)\n" + - "buckets 3 properties(\n" + - "\"replication_num\" = \"1\"\n" + - ");"; + String createStr = "create table test.tbl1(\n" + + "k1 int\n" + + ") distributed by hash(k1)\n" + + "buckets 3 properties(\n" + + "\"replication_num\" = \"1\"\n" + + ");"; CreateTableStmt createStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createStr, connectContext); ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Failed to find 1 backends for policy:", () -> DdlExecutor.execute(Catalog.getCurrentCatalog(), createStmt)); - createStr = "create table test.tbl1(\n" + - "k1 int\n" + - ") distributed by hash(k1)\n" + - "buckets 3 properties(\n" + - "\"replication_allocation\" = \"tag.location.zone1: 1\"\n" + - ");"; + createStr = "create table test.tbl1(\n" + + "k1 int\n" + + ") distributed by hash(k1)\n" + + "buckets 3 properties(\n" + + "\"replication_allocation\" = \"tag.location.zone1: 1\"\n" + + ");"; CreateTableStmt createStmt2 = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createStr, connectContext); ExceptionChecker.expectThrowsNoException(() -> DdlExecutor.execute(Catalog.getCurrentCatalog(), createStmt2)); // create dynamic partition tbl - createStr = "create table test.tbl3(\n" + - "k1 date, k2 int\n" + - ") partition by range(k1)()\n" + - "distributed by hash(k1)\n" + - "buckets 3 properties(\n" + - " \"dynamic_partition.enable\" = \"true\",\n" + - " \"dynamic_partition.time_unit\" = \"DAY\",\n" + - " \"dynamic_partition.start\" = \"-3\",\n" + - " \"dynamic_partition.end\" = \"3\",\n" + - " \"dynamic_partition.prefix\" = \"p\",\n" + - " \"dynamic_partition.buckets\" = \"1\",\n" + - " \"dynamic_partition.replication_num\" = \"1\"\n" + - ");"; + createStr = "create table test.tbl3(\n" + + "k1 date, k2 int\n" + + ") partition by range(k1)()\n" + + "distributed by hash(k1)\n" + + "buckets 3 properties(\n" + + " \"dynamic_partition.enable\" = \"true\",\n" + + " \"dynamic_partition.time_unit\" = \"DAY\",\n" + + " \"dynamic_partition.start\" = \"-3\",\n" + + " \"dynamic_partition.end\" = \"3\",\n" + + " \"dynamic_partition.prefix\" = \"p\",\n" + + " \"dynamic_partition.buckets\" = \"1\",\n" + + " \"dynamic_partition.replication_num\" = \"1\"\n" + + ");"; CreateTableStmt createStmt3 = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createStr, connectContext); // although there is no exception throw, but partition create failed, because there is no BE // with "default" tag @@ -121,22 +121,21 @@ public void testModifyBackendTag() throws Exception { String err = Catalog.getCurrentCatalog().getDynamicPartitionScheduler().getRuntimeInfo(tbl3.getId(), DynamicPartitionScheduler.CREATE_PARTITION_MSG); Assert.assertTrue(err.contains("Failed to find 1 backends for policy:")); - createStr = "create table test.tbl4(\n" + - "k1 date, k2 int\n" + - ") partition by range(k1)()\n" + - "distributed by hash(k1)\n" + - "buckets 3 properties(\n" + - " \"dynamic_partition.enable\" = \"true\",\n" + - " \"dynamic_partition.time_unit\" = \"DAY\",\n" + - " \"dynamic_partition.start\" = \"-3\",\n" + - " \"dynamic_partition.end\" = \"3\",\n" + - " \"dynamic_partition.prefix\" = \"p\",\n" + - " \"dynamic_partition.buckets\" = \"1\",\n" + - " \"dynamic_partition.replication_allocation\" = \"tag.location.zone1:1\"\n" + - ");"; + createStr = "create table test.tbl4(\n" + + "k1 date, k2 int\n" + + ") partition by range(k1)()\n" + + "distributed by hash(k1)\n" + + "buckets 3 properties(\n" + + " \"dynamic_partition.enable\" = \"true\",\n" + + " \"dynamic_partition.time_unit\" = \"DAY\",\n" + + " \"dynamic_partition.start\" = \"-3\",\n" + + " \"dynamic_partition.end\" = \"3\",\n" + + " \"dynamic_partition.prefix\" = \"p\",\n" + + " \"dynamic_partition.buckets\" = \"1\",\n" + + " \"dynamic_partition.replication_allocation\" = \"tag.location.zone1:1\"\n" + + ");"; CreateTableStmt createStmt4 = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createStr, connectContext); ExceptionChecker.expectThrowsNoException(() -> DdlExecutor.execute(Catalog.getCurrentCatalog(), createStmt4)); - DynamicPartitionScheduler scheduler = Catalog.getCurrentCatalog().getDynamicPartitionScheduler(); OlapTable tbl = (OlapTable) db.getTableNullable("tbl4"); PartitionInfo partitionInfo = tbl.getPartitionInfo(); Assert.assertEquals(4, partitionInfo.idToItem.size()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/PartitionKeyTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/PartitionKeyTest.java index 995afd164a402f..02a13ac5e1f723 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/PartitionKeyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/PartitionKeyTest.java @@ -198,7 +198,7 @@ public void compareTest() throws AnalysisException { @Test public void testSerialization() throws Exception { - FakeCatalog fakeCatalog = new FakeCatalog(); + FakeCatalog fakeCatalog = new FakeCatalog(); // CHECKSTYLE IGNORE THIS LINE FakeCatalog.setMetaVersion(FeConstants.meta_version); // 1. Write objects to file diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/RecoverTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/RecoverTest.java index bf5230e5e2d555..cafc8236159459 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/RecoverTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/RecoverTest.java @@ -120,32 +120,32 @@ private static boolean checkPartitionExist(String dbName, String tblName, String @Test public void testRecover() throws Exception { createDb("test"); - createTable("CREATE TABLE test.`table1` (\n" + - " `event_date` date NOT NULL COMMENT \"\",\n" + - " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `age` varchar(32) NOT NULL COMMENT \"\",\n" + - " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + - " `level` varchar(64) NOT NULL COMMENT \"\",\n" + - " `city` varchar(64) NOT NULL COMMENT \"\",\n" + - " `model` varchar(64) NOT NULL COMMENT \"\",\n" + - " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + - " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + - " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + - " `use_time` double SUM NOT NULL COMMENT \"\",\n" + - " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" - + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`event_date`)\n" + - "(PARTITION p1 VALUES [('2020-02-27'), ('2020-03-02')),\n" + - "PARTITION p2 VALUES [('2020-03-02'), ('2020-03-07')))\n" + - "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" - + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + createTable("CREATE TABLE test.`table1` (\n" + + " `event_date` date NOT NULL COMMENT \"\",\n" + + " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `age` varchar(32) NOT NULL COMMENT \"\",\n" + + " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + + " `level` varchar(64) NOT NULL COMMENT \"\",\n" + + " `city` varchar(64) NOT NULL COMMENT \"\",\n" + + " `model` varchar(64) NOT NULL COMMENT \"\",\n" + + " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + + " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + + " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + + " `use_time` double SUM NOT NULL COMMENT \"\",\n" + + " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`," + + " `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`event_date`)\n" + + "(PARTITION p1 VALUES [('2020-02-27'), ('2020-03-02')),\n" + + "PARTITION p2 VALUES [('2020-03-02'), ('2020-03-07')))\n" + + "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`," + + " `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); Assert.assertTrue(checkDbExist("test")); Assert.assertTrue(checkTableExist("test", "table1")); @@ -170,32 +170,32 @@ public void testRecover() throws Exception { Assert.assertTrue(checkDbExist("test")); Assert.assertFalse(checkTableExist("test", "table1")); - createTable("CREATE TABLE test.`table1` (\n" + - " `event_date` date NOT NULL COMMENT \"\",\n" + - " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `age` varchar(32) NOT NULL COMMENT \"\",\n" + - " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + - " `level` varchar(64) NOT NULL COMMENT \"\",\n" + - " `city` varchar(64) NOT NULL COMMENT \"\",\n" + - " `model` varchar(64) NOT NULL COMMENT \"\",\n" + - " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + - " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + - " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + - " `use_time` double SUM NOT NULL COMMENT \"\",\n" + - " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" - + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`event_date`)\n" + - "(PARTITION p1 VALUES [('2020-02-27'), ('2020-03-02')),\n" + - "PARTITION p2 VALUES [('2020-03-02'), ('2020-03-07')))\n" + - "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" - + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + createTable("CREATE TABLE test.`table1` (\n" + + " `event_date` date NOT NULL COMMENT \"\",\n" + + " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `age` varchar(32) NOT NULL COMMENT \"\",\n" + + " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + + " `level` varchar(64) NOT NULL COMMENT \"\",\n" + + " `city` varchar(64) NOT NULL COMMENT \"\",\n" + + " `model` varchar(64) NOT NULL COMMENT \"\",\n" + + " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + + " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + + " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + + " `use_time` double SUM NOT NULL COMMENT \"\",\n" + + " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`event_date`, `app_name`, `package_name`," + + " `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`event_date`)\n" + + "(PARTITION p1 VALUES [('2020-02-27'), ('2020-03-02')),\n" + + "PARTITION p2 VALUES [('2020-03-02'), ('2020-03-07')))\n" + + "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`," + + " `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); Assert.assertTrue(checkDbExist("test")); Assert.assertTrue(checkTableExist("test", "table1")); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java index 900e7ec3dabc69..5aa93cf299a183 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java @@ -119,12 +119,12 @@ public void testAbnormal() { final Map properties = Maps.newHashMap(); properties.put(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, "3"); ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "Invalid replication allocation property: 3", - ()->PropertyAnalyzer.analyzeReplicaAllocation(properties, "")); + () -> PropertyAnalyzer.analyzeReplicaAllocation(properties, "")); properties.clear(); properties.put(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, "tag.location.12321:1"); ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "Invalid tag format: location:12321", - ()->PropertyAnalyzer.analyzeReplicaAllocation(properties, "")); + () -> PropertyAnalyzer.analyzeReplicaAllocation(properties, "")); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java index 9e1f88817ba3ec..68133da2e65959 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java @@ -145,7 +145,7 @@ public void testAbnormalResource(@Mocked Catalog catalog, @Injectable PaloAuth a } @Test - public void testSerialization() throws Exception{ + public void testSerialization() throws Exception { MetaContext metaContext = new MetaContext(); metaContext.setMetaVersion(FeMetaVersion.VERSION_CURRENT); metaContext.setThreadLocalInfo(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java index b3f412d51efd27..98cb2febe7906f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java @@ -131,7 +131,8 @@ public void testSerialization() throws Exception { OlapTable table1 = new OlapTable(1000L, "group1", columns, KeysType.AGG_KEYS, new SinglePartitionInfo(), new RandomDistributionInfo(10)); short shortKeyColumnCount = 1; - table1.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS); + table1.setIndexMeta(1000, "group1", columns, 1, 1, + shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS); List column = Lists.newArrayList(); column.add(column2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java index 0a30e175f645fd..33dfc86ce578de 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java @@ -208,15 +208,15 @@ public void testForMultiPartitionTable() throws Exception { System.out.println(Catalog.getCurrentCatalog().getDbNames()); // create table tbl2 - String createTblStmtStr1 = "create table db2.tbl2 (k1 int, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - "partition p1 values less than('10'),\n" + - "partition p2 values less than('20'),\n" + - "partition p3 values less than('30')\n" + - ")\n" + - "distributed by hash(k2) buckets 1\n" + - "properties('replication_num' = '1');"; + String createTblStmtStr1 = "create table db2.tbl2 (k1 int, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + "partition p1 values less than('10'),\n" + + "partition p2 values less than('20'),\n" + + "partition p3 values less than('30')\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); @@ -338,7 +338,7 @@ public void testForMultiPartitionTable() throws Exception { checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db2.tbl2 partition (p3);"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); + TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowPartitionsResultNum("db2.tbl2", true, 1); checkShowPartitionsResultNum("db2.tbl2", false, 3); @@ -495,20 +495,19 @@ public void testForStrictRangeCheck() throws Exception { System.out.println(Catalog.getCurrentCatalog().getDbNames()); // create table tbl3 - String createTblStmtStr1 = "create table db3.tbl3 (k1 int, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - "partition p1 values less than('10'),\n" + - "partition p2 values less than('20'),\n" + - "partition p3 values less than('30')\n" + - ")\n" + - "distributed by hash(k2) buckets 1\n" + - "properties('replication_num' = '1');"; + String createTblStmtStr1 = "create table db3.tbl3 (k1 int, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + "partition p1 values less than('10'),\n" + + "partition p2 values less than('20'),\n" + + "partition p3 values less than('30')\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); - Database db3 = Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:db3"); - OlapTable tbl3 = (OlapTable) db3.getTableOrAnalysisException("tbl3"); + Catalog.getCurrentCatalog().getDbOrAnalysisException("default_cluster:db3"); // base range is [min, 10), [10, 20), [20, 30) @@ -571,15 +570,15 @@ public void testForListPartitionTable() throws Exception { System.out.println(Catalog.getCurrentCatalog().getDbNames()); // create table tbl4 - String createTblStmtStr1 = "create table db4.tbl4 (k1 int not null, k2 int)\n" + - "partition by list(k1)\n" + - "(\n" + - "partition p1 values in ('1', '2', '3'),\n" + - "partition p2 values in ('4', '5', '6'),\n" + - "partition p3 values in ('7', '8', '9')\n" + - ")\n" + - "distributed by hash(k2) buckets 1\n" + - "properties('replication_num' = '1');"; + String createTblStmtStr1 = "create table db4.tbl4 (k1 int not null, k2 int)\n" + + "partition by list(k1)\n" + + "(\n" + + "partition p1 values in ('1', '2', '3'),\n" + + "partition p2 values in ('4', '5', '6'),\n" + + "partition p3 values in ('7', '8', '9')\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); @@ -694,14 +693,14 @@ public void testForListPartitionTable() throws Exception { stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db4.tbl4", true, 1); // tp3 - checkShowPartitionsResultNum("db4.tbl4", false, 3);// tp1, tp2, p3 + checkShowPartitionsResultNum("db4.tbl4", false, 3); // tp1, tp2, p3 checkTabletExists(tempPartitionTabletIds2.values(), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db4.tbl4 partition (p3);"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); + TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowPartitionsResultNum("db4.tbl4", true, 1); checkShowPartitionsResultNum("db4.tbl4", false, 3); @@ -909,15 +908,15 @@ public void testForMultiListPartitionTable() throws Exception { System.out.println(Catalog.getCurrentCatalog().getDbNames()); // create table tbl5 - String createTblStmtStr1 = "create table db5.tbl5 (k1 int not null, k2 varchar not null)\n" + - "partition by list(k1, k2)\n" + - "(\n" + - "partition p1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\")),\n" + - "partition p2 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\")),\n" + - "partition p3 values in ((\"3\",\"beijing\"), (\"3\", \"shanghai\"))\n" + - ")\n" + - "distributed by hash(k2) buckets 1\n" + - "properties('replication_num' = '1');"; + String createTblStmtStr1 = "create table db5.tbl5 (k1 int not null, k2 varchar not null)\n" + + "partition by list(k1, k2)\n" + + "(\n" + + "partition p1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\")),\n" + + "partition p2 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\")),\n" + + "partition p3 values in ((\"3\",\"beijing\"), (\"3\", \"shanghai\"))\n" + + ")\n" + + "distributed by hash(k2) buckets 1\n" + + "properties('replication_num' = '1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); @@ -1032,14 +1031,14 @@ public void testForMultiListPartitionTable() throws Exception { stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db5.tbl5", true, 1); // tp3 - checkShowPartitionsResultNum("db5.tbl5", false, 3);// tp1, tp2, p3 + checkShowPartitionsResultNum("db5.tbl5", false, 3); // tp1, tp2, p3 checkTabletExists(tempPartitionTabletIds2.values(), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db5.tbl5 partition (p3);"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); + TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowPartitionsResultNum("db5.tbl5", true, 1); checkShowPartitionsResultNum("db5.tbl5", false, 3); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java index 80aec2011f2826..09732b6a5f6fad 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java @@ -48,30 +48,30 @@ public static void setup() throws Exception { connectContext = UtFrameUtils.createDefaultCtx(); // create database String createDbStmtStr = "create database test;"; - String createTableStr = "create table test.tbl(d1 date, k1 int, k2 bigint)" + - "duplicate key(d1, k1) " + - "PARTITION BY RANGE(d1)" + - "(PARTITION p20210901 VALUES [('2021-09-01'), ('2021-09-02')))" + - "distributed by hash(k1) buckets 2 " + - "properties('replication_num' = '1');"; + String createTableStr = "create table test.tbl(d1 date, k1 int, k2 bigint)" + + "duplicate key(d1, k1) " + + "PARTITION BY RANGE(d1)" + + "(PARTITION p20210901 VALUES [('2021-09-01'), ('2021-09-02')))" + + "distributed by hash(k1) buckets 2 " + + "properties('replication_num' = '1');"; createDb(createDbStmtStr); createTable(createTableStr); - String createTable2 = "CREATE TABLE test.case_sensitive_table (\n" + - " `date_id` date NULL COMMENT \"\",\n" + - " `column2` tinyint(4) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`date_id`, `column2`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`date_id`)\n" + - "(\n" + - "PARTITION p20211006 VALUES [('2021-10-06'), ('2021-10-07')),\n" + - "PARTITION P20211007 VALUES [('2021-10-07'), ('2021-10-08')),\n" + - "PARTITION P20211008 VALUES [('2021-10-08'), ('2021-10-09')))\n" + - "DISTRIBUTED BY HASH(`column2`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\"\n" + - ");"; + String createTable2 = "CREATE TABLE test.case_sensitive_table (\n" + + " `date_id` date NULL COMMENT \"\",\n" + + " `column2` tinyint(4) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`date_id`, `column2`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`date_id`)\n" + + "(\n" + + "PARTITION p20211006 VALUES [('2021-10-06'), ('2021-10-07')),\n" + + "PARTITION P20211007 VALUES [('2021-10-07'), ('2021-10-08')),\n" + + "PARTITION P20211008 VALUES [('2021-10-08'), ('2021-10-09')))\n" + + "DISTRIBUTED BY HASH(`column2`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\"\n" + + ");"; createTable(createTable2); } @@ -119,7 +119,7 @@ public void testTruncateTable() throws Exception { checkShowTabletResultNum("test.tbl", "p20210904", 5); String truncateStr = "truncate table test.tbl;"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210901", 2); checkShowTabletResultNum("test.tbl", "p20210902", 3); @@ -127,7 +127,7 @@ public void testTruncateTable() throws Exception { checkShowTabletResultNum("test.tbl", "p20210904", 5); truncateStr = "truncate table test.tbl partition(p20210901, p20210902, p20210903, p20210904);"; - truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210901", 2); checkShowTabletResultNum("test.tbl", "p20210902", 3); @@ -135,22 +135,22 @@ public void testTruncateTable() throws Exception { checkShowTabletResultNum("test.tbl", "p20210904", 5); truncateStr = "truncate table test.tbl partition (p20210901);"; - truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210901", 2); truncateStr = "truncate table test.tbl partition (p20210902);"; - truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210902", 3); truncateStr = "truncate table test.tbl partition (p20210903);"; - truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210903", 4); truncateStr = "truncate table test.tbl partition (p20210904);"; - truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210904", 5); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java index 517caca9d575c4..1fbd3a8dd4b68e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java @@ -152,7 +152,8 @@ private void createPartitionsForTable(OlapTable olapTable, MaterializedIndex ind public void testDiskRebalancerWithSameUsageDisk() { // init system List beIds = Lists.newArrayList(10001L, 10002L, 10003L); - beIds.forEach(id -> systemInfoService.addBackend(RebalancerTestUtil.createBackend(id, 2048, Lists.newArrayList(512L,512L), 2))); + beIds.forEach(id -> systemInfoService.addBackend(RebalancerTestUtil.createBackend( + id, 2048, Lists.newArrayList(512L, 512L), 2))); olapTable = new OlapTable(2, "fake table", new ArrayList<>(), KeysType.DUP_KEYS, new RangePartitionInfo(), new HashDistributionInfo()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java index e1bf4a6e7c254c..9d762752e323e2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java @@ -185,14 +185,14 @@ private void createPartitionsForTable(OlapTable olapTable, MaterializedIndex ind public void testPrioBackends() { Rebalancer rebalancer = new DiskRebalancer(Catalog.getCurrentSystemInfo(), Catalog.getCurrentInvertedIndex()); // add - { + { // CHECKSTYLE IGNORE THIS LINE List backends = Lists.newArrayList(); for (int i = 0; i < 3; i++) { backends.add(RebalancerTestUtil.createBackend(10086 + i, 2048, 0)); } rebalancer.addPrioBackends(backends, 1000); Assert.assertTrue(rebalancer.hasPrioBackends()); - } + } // CHECKSTYLE IGNORE THIS LINE // remove for (int i = 0; i < 3; i++) { @@ -259,12 +259,6 @@ public void testPartitionRebalancer() { LOG.info("created tasks for tablet: {}", needCheckTablets); needCheckTablets.forEach(t -> Assert.assertEquals(4, invertedIndex.getReplicasByTabletId(t).size())); -// // If clone task execution is too slow, tabletChecker may want to delete the CLONE replica. -// tabletChecker.runAfterCatalogReady(); -// Assert.assertTrue(tabletScheduler.containsTablet(50000)); -// // tabletScheduler handle redundant -// tabletScheduler.runAfterCatalogReady(); - for (Long tabletId : needCheckTablets) { TabletSchedCtx tabletSchedCtx = alternativeTablets.stream() .filter(ctx -> ctx.getTabletId() == tabletId) diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java index 5bd8ef9e08454f..8f6f9f6f58d37e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java @@ -75,8 +75,8 @@ public static void createTablet(TabletInvertedIndex invertedIndex, Database db, MaterializedIndex baseIndex = partition.getBaseIndex(); int schemaHash = olapTable.getSchemaHashByIndexId(baseIndex.getId()); - TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(), partition.getId(), baseIndex.getId(), - schemaHash, medium); + TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(), + partition.getId(), baseIndex.getId(), schemaHash, medium); Tablet tablet = new Tablet(tabletId); // add tablet to olapTable diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java index a32d21ee6aa0bb..f60e5013b6d1f6 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java @@ -218,47 +218,47 @@ public void test() throws Exception { // create table // 1. no default tag, create will fail - String createStr = "create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10;"; + String createStr = "create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10;"; ExceptionChecker.expectThrows(DdlException.class, () -> createTable(createStr)); // nodes of zone2 not enough, create will fail - String createStr2 = "create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10\n" + - "properties\n" + - "(\n" + - " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 3\"\n" + - ")"; + String createStr2 = "create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10\n" + + "properties\n" + + "(\n" + + " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 3\"\n" + + ")"; ExceptionChecker.expectThrows(DdlException.class, () -> createTable(createStr2)); // normal, create success - String createStr3 = "create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10\n" + - "properties\n" + - "(\n" + - " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\"\n" + - ")"; + String createStr3 = "create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10\n" + + "properties\n" + + "(\n" + + " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\"\n" + + ")"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr3)); Database db = Catalog.getCurrentCatalog().getDbNullable("default_cluster:test"); OlapTable tbl = (OlapTable) db.getTableNullable("tbl1"); @@ -321,35 +321,35 @@ public void test() throws Exception { // [0, 1]: zone1 // [2, 3, 4]: zone2 // begin to test colocation table - String createStr4 = "create table test.col_tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10\n" + - "properties\n" + - "(\n" + - " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\",\n" + - " \"colocate_with\" = \"g1\"\n" + - ")"; + String createStr4 = "create table test.col_tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10\n" + + "properties\n" + + "(\n" + + " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\",\n" + + " \"colocate_with\" = \"g1\"\n" + + ")"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr4)); - String createStr5 = "create table test.col_tbl2\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10\n" + - "properties\n" + - "(\n" + - " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\",\n" + - " \"colocate_with\" = \"g1\"\n" + - ")"; + String createStr5 = "create table test.col_tbl2\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10\n" + + "properties\n" + + "(\n" + + " \"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 1\",\n" + + " \"colocate_with\" = \"g1\"\n" + + ")"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr5)); OlapTable colTbl1 = (OlapTable) db.getTableNullable("col_tbl1"); @@ -426,15 +426,15 @@ public void test() throws Exception { Assert.assertEquals(Tag.DEFAULT_BACKEND_TAG, backends.get(4).getTag()); // create table tbl2 with "replication_num" property - String createStmt = "create table test.tbl2\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10;"; + String createStmt = "create table test.tbl2\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10;"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStmt)); OlapTable tbl2 = (OlapTable) db.getTableNullable("tbl2"); ReplicaAllocation defaultAlloc = new ReplicaAllocation((short) 3); @@ -460,14 +460,14 @@ public void test() throws Exception { ExceptionChecker.expectThrowsNoException(() -> testColocateTableIndexSerialization(colocateTableIndex)); // test colocate tablet repair - String createStr6 = "create table test.col_tbl3\n" + - "(k1 date, k2 int)\n" + - "distributed by hash(k2) buckets 1\n" + - "properties\n" + - "(\n" + - " \"replication_num\" = \"3\",\n" + - " \"colocate_with\" = \"g3\"\n" + - ")"; + String createStr6 = "create table test.col_tbl3\n" + + "(k1 date, k2 int)\n" + + "distributed by hash(k2) buckets 1\n" + + "properties\n" + + "(\n" + + " \"replication_num\" = \"3\",\n" + + " \"colocate_with\" = \"g3\"\n" + + ")"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr6)); OlapTable tbl3 = db.getOlapTableOrDdlException("col_tbl3"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java index 61eebfc3de71ae..2a929d8f838714 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java @@ -156,13 +156,13 @@ private static void updateReplicaVersionCount() { @Test public void test() throws Exception { // test colocate tablet repair - String createStr = "create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "distributed by hash(k2) buckets 1\n" + - "properties\n" + - "(\n" + - " \"replication_num\" = \"3\"\n" + - ")"; + String createStr = "create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "distributed by hash(k2) buckets 1\n" + + "properties\n" + + "(\n" + + " \"replication_num\" = \"3\"\n" + + ")"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr)); int maxLoop = 300; diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java index 6acb26a5b430c7..539ce997d6792f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java @@ -174,9 +174,11 @@ public void testInvalidClusterBalanceInfo() { } try { - algo.getNextMoves(new ClusterBalanceInfo() {{ - beByTotalReplicaCount.put(0L, 10001L); - }}, 0); + algo.getNextMoves(new ClusterBalanceInfo() { + { + beByTotalReplicaCount.put(0L, 10001L); + } + }, 0); } catch (Exception e) { Assert.fail(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/CidrTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/CidrTest.java index a63b33df7bee6e..ccb07b756bb476 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/CidrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/CidrTest.java @@ -25,27 +25,27 @@ public class CidrTest { public void testWrongFormat() { // no mask try { - CIDR cidr = new CIDR("192.168.17.0/"); + new CIDR("192.168.17.0/"); // should not be here - Assert.assertTrue(false); + Assert.fail(); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } // mask is too big try { - CIDR cidr = new CIDR("192.168.17.0/88"); + new CIDR("192.168.17.0/88"); // should not be here - Assert.assertTrue(false); + Assert.fail(); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } // ip is too short try { - CIDR cidr = new CIDR("192.168./88"); + new CIDR("192.168./88"); // should not be here - Assert.assertTrue(false); + Assert.fail(); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/MarkDownParserTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/MarkDownParserTest.java index b0c4cfb1c9ab63..bcdbf3e23c9622 100755 --- a/fe/fe-core/src/test/java/org/apache/doris/common/MarkDownParserTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/MarkDownParserTest.java @@ -166,7 +166,7 @@ public void testEmptyTitle() throws UserException { lines.add("### url"); lines.add("http://www.baidu.com"); MarkDownParser parser = new MarkDownParser(lines); - Map> map = parser.parse(); + parser.parse(); } // no valid topic diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/PatternMatcherTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/PatternMatcherTest.java index adeda52c7db93a..dd4708cc7da73a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/PatternMatcherTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/PatternMatcherTest.java @@ -104,23 +104,23 @@ public void testNormal() { } @Test - public void testAbnormal(){ + public void testAbnormal() { try { - PatternMatcher matcher = PatternMatcher.createMysqlPattern("^abc", false); + PatternMatcher.createMysqlPattern("^abc", false); Assert.fail(); } catch (AnalysisException e) { System.out.println(e.getMessage()); } try { - PatternMatcher matcher = PatternMatcher.createMysqlPattern("\\\\(abc", false); + PatternMatcher.createMysqlPattern("\\\\(abc", false); Assert.fail(); } catch (AnalysisException e) { System.out.println(e.getMessage()); } try { - PatternMatcher matcher = PatternMatcher.createMysqlPattern("\\*abc", false); + PatternMatcher.createMysqlPattern("\\*abc", false); Assert.fail(); } catch (AnalysisException e) { System.out.println(e.getMessage()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java index 71192c6953e90e..73f329fd281f81 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java @@ -84,8 +84,8 @@ public void testBfColumnsError() { // no bf columns properties.put(PropertyAnalyzer.PROPERTIES_BF_COLUMNS, ""); try { - Assert.assertEquals(Sets.newHashSet(), PropertyAnalyzer.analyzeBloomFilterColumns(properties, columns, - KeysType.AGG_KEYS)); + Assert.assertEquals(Sets.newHashSet(), PropertyAnalyzer.analyzeBloomFilterColumns( + properties, columns, KeysType.AGG_KEYS)); } catch (AnalysisException e) { Assert.fail(); } @@ -164,8 +164,8 @@ public void testStorageFormat() throws AnalysisException { Assert.assertEquals(TStorageFormat.V2, PropertyAnalyzer.analyzeStorageFormat(propertiesV2)); Assert.assertEquals(TStorageFormat.V2, PropertyAnalyzer.analyzeStorageFormat(propertiesDefault)); expectedEx.expect(AnalysisException.class); - expectedEx.expectMessage("Storage format V1 has been deprecated since version 0.14," + - " please use V2 instead"); + expectedEx.expectMessage("Storage format V1 has been deprecated since version 0.14," + + " please use V2 instead"); PropertyAnalyzer.analyzeStorageFormat(propertiesV1); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/parquet/ParquetReaderTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/parquet/ParquetReaderTest.java index fa0ae61c09231d..d797a9c1b26ed5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/parquet/ParquetReaderTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/parquet/ParquetReaderTest.java @@ -51,7 +51,7 @@ public void testWrongFormat() { properties.put("bos_secret_accesskey", "2"); BrokerDesc brokerDesc = new BrokerDesc("dummy", properties); - ParquetReader reader2 = ParquetReader.create(file2, brokerDesc,"127.0.0.1", 8118); + ParquetReader reader2 = ParquetReader.create(file2, brokerDesc, "127.0.0.1", 8118); LOG.info(reader2.getSchema(false)); } catch (Exception e) { LOG.info("error: ", e); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendsProcDirTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendsProcDirTest.java index 6e0ae4ccb5aded..971a1f5fc55ba5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendsProcDirTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendsProcDirTest.java @@ -158,17 +158,16 @@ public void testLookupNormal() throws AnalysisException { @Test public void testLookupInvalid() { BackendsProcDir dir; - ProcNodeInterface node; dir = new BackendsProcDir(systemInfoService); try { - node = dir.lookup(null); + dir.lookup(null); } catch (AnalysisException e) { e.printStackTrace(); } try { - node = dir.lookup(""); + dir.lookup(""); } catch (AnalysisException e) { e.printStackTrace(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/proc/DbsProcDirTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/proc/DbsProcDirTest.java index e9c4d47cdc0db9..f2ede843985a06 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/proc/DbsProcDirTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/proc/DbsProcDirTest.java @@ -123,18 +123,17 @@ public void testLookupNormal() throws AnalysisException { @Test public void testLookupInvalid() { DbsProcDir dir; - ProcNodeInterface node; dir = new DbsProcDir(catalog); try { - node = dir.lookup(null); + dir.lookup(null); } catch (AnalysisException e) { // TODO Auto-generated catch block e.printStackTrace(); } try { - node = dir.lookup(""); + dir.lookup(""); } catch (AnalysisException e) { // TODO Auto-generated catch block e.printStackTrace(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java index 928c1374a7a18c..3508d3b71a12c2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/BrokerUtilTest.java @@ -73,16 +73,18 @@ public void parseColumnsFromPath() { path = "/path/to/dir/k1/xxx.csv"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); + BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); Assert.fail(); } catch (UserException ignored) { + // CHECKSTYLE IGNORE THIS LINE } path = "/path/to/dir/k1=v1/xxx.csv"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k2")); + BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k2")); Assert.fail(); } catch (UserException ignored) { + // CHECKSTYLE IGNORE THIS LINE } path = "/path/to/dir/k1=v2/k1=v1/xxx.csv"; @@ -105,16 +107,18 @@ public void parseColumnsFromPath() { path = "/path/to/dir/k2=v2/a/k1=v1/xxx.csv"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); + BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); Assert.fail(); } catch (UserException ignored) { + // CHECKSTYLE IGNORE THIS LINE } path = "/path/to/dir/k2=v2/k1=v1/xxx.csv"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2", "k3")); + BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2", "k3")); Assert.fail(); } catch (UserException ignored) { + // CHECKSTYLE IGNORE THIS LINE } path = "/path/to/dir/k2=v2//k1=v1//xxx.csv"; @@ -137,14 +141,15 @@ public void parseColumnsFromPath() { path = "/path/to/dir/k2==v2=//k1=v1/"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); + BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); Assert.fail(); } catch (UserException ignored) { + // CHECKSTYLE IGNORE THIS LINE } path = "/path/to/dir/k1=2/a/xxx.csv"; try { - List columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); + BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); Assert.fail(); } catch (UserException ignored) { ignored.printStackTrace(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/ListUtilTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/ListUtilTest.java index 9889d093556c76..21652d4f186194 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/ListUtilTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/ListUtilTest.java @@ -100,9 +100,9 @@ public void testSplitBySizeWithLargeExpectSize() { List> splitLists = ListUtil.splitBySize(lists, expectSize); Assert.assertEquals(splitLists.size(), lists.size()); - Assert.assertTrue( splitLists.get(0).get(0) == 1); - Assert.assertTrue( splitLists.get(1).get(0) == 2); - Assert.assertTrue( splitLists.get(2).get(0) == 3); + Assert.assertEquals(1, (int) splitLists.get(0).get(0)); + Assert.assertEquals(2, (int) splitLists.get(1).get(0)); + Assert.assertEquals(3, (int) splitLists.get(2).get(0)); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/EsNodeInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/EsNodeInfoTest.java index 6192caa5b58867..59b9d8757ec69e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/EsNodeInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/EsNodeInfoTest.java @@ -22,17 +22,15 @@ import org.junit.Assert; import org.junit.Test; -import java.util.HashMap; import java.util.Map; -public class EsNodeInfoTest extends EsTestCase{ +public class EsNodeInfoTest extends EsTestCase { @Test public void parsePublishAddressTest() throws Exception { ObjectMapper mapper = new ObjectMapper(); JsonParser jsonParser = mapper.getJsonFactory().createJsonParser(loadJsonFromFile("data/es/test_nodes_http.json")); Map> nodesData = (Map>) mapper.readValue(jsonParser, Map.class).get("nodes"); - Map nodesMap = new HashMap<>(); for (Map.Entry> entry : nodesData.entrySet()) { EsNodeInfo node = new EsNodeInfo(entry.getKey(), entry.getValue(), false); if ("node-A".equals(node.getName())) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/MappingPhaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/MappingPhaseTest.java index e29613b52958a6..40dc9eaf6f3f02 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/MappingPhaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/external/elasticsearch/MappingPhaseTest.java @@ -80,7 +80,7 @@ public void testTypeNotExist() throws Exception { } @Test - public void testWorkFlow(@Injectable EsRestClient client) throws Exception{ + public void testWorkFlow(@Injectable EsRestClient client) throws Exception { EsTable table = fakeEsTable("fake", "test", "doc", columns); SearchContext searchContext1 = new SearchContext(table); String jsonMapping = loadJsonFromFile("data/es/test_index_mapping.json"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java index 768e0e8cc6c2fa..840b2a1672d20e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java +++ b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java @@ -306,6 +306,7 @@ public static void initHttpServer() throws IllegalArgException, InterruptedExcep try { socket.close(); } catch (Exception e) { + // CHECKSTYLE IGNORE THIS LINE } } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/http/HttpAuthManagerTest.java b/fe/fe-core/src/test/java/org/apache/doris/http/HttpAuthManagerTest.java index ad41c61160d975..bde7dc96cad770 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/http/HttpAuthManagerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/http/HttpAuthManagerTest.java @@ -39,14 +39,14 @@ public void testNormal() { authMgr.addSessionValue(sessionId, sessionValue); Assert.assertEquals(1, authMgr.getAuthSessions().size()); List sessionIds = new ArrayList<>(); - sessionIds.add (sessionId); + sessionIds.add(sessionId); System.out.println("username in test: " + authMgr.getSessionValue(sessionIds).currentUser); Assert.assertEquals(username, authMgr.getSessionValue(sessionIds).currentUser.getQualifiedUser()); String noExistSession = "no-exist-session-id"; sessionIds.clear(); - sessionIds.add (noExistSession); + sessionIds.add(noExistSession); Assert.assertNull(authMgr.getSessionValue(sessionIds)); Assert.assertEquals(1, authMgr.getAuthSessions().size()); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java index 4b1360c2ce67a0..b050b633f12ebe 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java @@ -196,6 +196,7 @@ public void testUnQuorumTimeout() throws DdlException, QueryStateException { try { globalTransactionMgr.abortTransaction(db.getId(), anyLong, anyString); } catch (UserException e) { + // CHECKSTYLE IGNORE THIS LINE } minTimes = 0; } @@ -246,7 +247,8 @@ public TransactionState getTransactionState(long transactionId) { } try { deleteHandler.process(deleteStmt); - }catch (QueryStateException e) { + } catch (QueryStateException e) { + // CHECKSTYLE IGNORE THIS LINE } Map idToDeleteJob = Deencapsulation.getField(deleteHandler, "idToDeleteJob"); @@ -297,6 +299,7 @@ public TransactionState getTransactionState(long transactionId) { try { deleteHandler.process(deleteStmt); } catch (QueryStateException e) { + // CHECKSTYLE IGNORE THIS LINE } Map idToDeleteJob = Deencapsulation.getField(deleteHandler, "idToDeleteJob"); @@ -334,6 +337,7 @@ public Collection getTabletDeleteInfo() { try { countDownLatch.await(anyLong, (TimeUnit) any); } catch (InterruptedException e) { + // CHECKSTYLE IGNORE THIS LINE } result = false; } @@ -344,6 +348,7 @@ public Collection getTabletDeleteInfo() { try { globalTransactionMgr.commitTransaction(anyLong, (List
) any, anyLong, (List) any, (TxnCommitAttachment) any); } catch (UserException e) { + // CHECKSTYLE IGNORE THIS LINE } result = new UserException("commit fail"); } @@ -365,6 +370,7 @@ public Collection getTabletDeleteInfo() { } throw e; } catch (QueryStateException e) { + // CHECKSTYLE IGNORE THIS LINE } Assert.fail(); } @@ -396,6 +402,7 @@ public Collection getTabletDeleteInfo() { try { countDownLatch.await(anyLong, (TimeUnit) any); } catch (InterruptedException e) { + // CHECKSTYLE IGNORE THIS LINE } result = false; } @@ -416,6 +423,7 @@ public Collection getTabletDeleteInfo() { try { deleteHandler.process(deleteStmt); } catch (QueryStateException e) { + // CHECKSTYLE IGNORE THIS LINE } Map idToDeleteJob = Deencapsulation.getField(deleteHandler, "idToDeleteJob"); @@ -453,6 +461,7 @@ public Collection getTabletDeleteInfo() { try { countDownLatch.await(anyLong, (TimeUnit) any); } catch (InterruptedException e) { + // CHECKSTYLE IGNORE THIS LINE } result = false; } @@ -466,6 +475,7 @@ public Collection getTabletDeleteInfo() { try { deleteHandler.process(deleteStmt); } catch (QueryStateException e) { + // CHECKSTYLE IGNORE THIS LINE } Map idToDeleteJob = Deencapsulation.getField(deleteHandler, "idToDeleteJob"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/DppSchedulerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/DppSchedulerTest.java index 971748ad5719b5..9e0fdbc9b6addf 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/DppSchedulerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/DppSchedulerTest.java @@ -46,11 +46,6 @@ public class DppSchedulerTest { @Before public void setUp() { - // mock palo home env -// PowerMock.mockStatic(System.class); -// EasyMock.expect(System.getenv("DORIS_HOME")).andReturn(".").anyTimes(); -// PowerMock.replay(System.class); - UnitTestUtil.initDppConfig(); dppScheduler = new DppScheduler(Load.dppDefaultConfig); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/LoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/LoadJobTest.java index 846ea6163babfc..32a3c156575867 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/LoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/LoadJobTest.java @@ -135,7 +135,7 @@ public LoadJob getLoadJob() { @Test public void testSerialization() throws Exception { // mock meta version - FakeCatalog fakeCatalog = new FakeCatalog(); + FakeCatalog fakeCatalog = new FakeCatalog(); // CHECKSTYLE IGNORE THIS LINE FakeCatalog.setMetaVersion(FeConstants.meta_version); File file = new File("./loadJobTest" + System.currentTimeMillis()); @@ -202,10 +202,10 @@ public void testClear() throws Exception { @Test public void testEqual() throws Exception { - LoadJob job1 = getLoadJob(); - LoadJob job2 = new LoadJob(); + getLoadJob(); + new LoadJob(); Thread.sleep(10); - LoadJob job3 = getLoadJob(); + getLoadJob(); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/BrokerLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/BrokerLoadJobTest.java index 082be4552cc19c..4baf3fe09b7340 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/BrokerLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/BrokerLoadJobTest.java @@ -343,7 +343,7 @@ public void testPendingTaskOnFinishedWithUserInfo(@Mocked BrokerPendingTaskAttac @Mocked OlapTable olapTable, @Mocked PlanFragment sinkFragment, @Mocked OlapTableSink olapTableSink, - @Mocked BrokerScanNode scanNode) throws Exception{ + @Mocked BrokerScanNode scanNode) throws Exception { List schema = new ArrayList<>(); schema.add(new Column("a", PrimitiveType.BIGINT)); Map properties = new HashMap<>(); @@ -359,7 +359,7 @@ public void testPendingTaskOnFinishedWithUserInfo(@Mocked BrokerPendingTaskAttac UUID uuid = UUID.randomUUID(); TUniqueId loadId = new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); RuntimeProfile jobProfile = new RuntimeProfile("test"); - LoadLoadingTask task = new LoadLoadingTask(database, olapTable,brokerDesc, fileGroups, + LoadLoadingTask task = new LoadLoadingTask(database, olapTable, brokerDesc, fileGroups, 100, 100, false, 100, callback, "", 100, 1, 1, true, jobProfile, false); try { diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadJobTest.java index d466a41b140bea..30cf65b44e12bc 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadJobTest.java @@ -63,6 +63,7 @@ public void testSetJobPropertiesWithErrorTimeout() { loadJob.setJobProperties(jobProperties); Assert.fail(); } catch (DdlException e) { + // CHECKSTYLE IGNORE THIS LINE } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadManagerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadManagerTest.java index db17f2b5be972b..6a28a33c507b9c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadManagerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/LoadManagerTest.java @@ -113,7 +113,7 @@ public void testCreateHadoopJob(@Injectable LoadStmt stmt, public void testSerializationNormal(@Mocked Catalog catalog, @Injectable Database database, @Injectable Table table) throws Exception { - new Expectations(){ + new Expectations() { { catalog.getDbNullable(anyLong); minTimes = 0; @@ -148,7 +148,7 @@ public void testSerializationWithJobRemoved(@Mocked MetaContext metaContext, @Mocked Catalog catalog, @Injectable Database database, @Injectable Table table) throws Exception { - new Expectations(){ + new Expectations() { { catalog.getDbNullable(anyLong); minTimes = 0; diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkEtlJobHandlerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkEtlJobHandlerTest.java index d05d59668a71bc..63e3c87491ba35 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkEtlJobHandlerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkEtlJobHandlerTest.java @@ -70,50 +70,50 @@ public class SparkEtlJobHandlerTest { private String remoteArchivePath; private SparkRepository.SparkArchive archive; - private final String runningReport = "Application Report :\n" + - "Application-Id : application_15888888888_0088\n" + - "Application-Name : label0\n" + - "Application-Type : SPARK-2.4.1\n" + - "User : test\n" + - "Queue : test-queue\n" + - "Start-Time : 1597654469958\n" + - "Finish-Time : 0\n" + - "Progress : 50%\n" + - "State : RUNNING\n" + - "Final-State : UNDEFINED\n" + - "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + - "RPC Port : 40236\n" + - "AM Host : host-name"; - - private final String failedReport = "Application Report :\n" + - "Application-Id : application_15888888888_0088\n" + - "Application-Name : label0\n" + - "Application-Type : SPARK-2.4.1\n" + - "User : test\n" + - "Queue : test-queue\n" + - "Start-Time : 1597654469958\n" + - "Finish-Time : 1597654801939\n" + - "Progress : 100%\n" + - "State : FINISHED\n" + - "Final-State : FAILED\n" + - "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + - "RPC Port : 40236\n" + - "AM Host : host-name"; - - private final String finishReport = "Application Report :\n" + - "Application-Id : application_15888888888_0088\n" + - "Application-Name : label0\n" + - "Application-Type : SPARK-2.4.1\n" + - "User : test\n" + - "Queue : test-queue\n" + - "Start-Time : 1597654469958\n" + - "Finish-Time : 1597654801939\n" + - "Progress : 100%\n" + - "State : FINISHED\n" + - "Final-State : SUCCEEDED\n" + - "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + - "RPC Port : 40236\n" + - "AM Host : host-name"; + private final String runningReport = "Application Report :\n" + + "Application-Id : application_15888888888_0088\n" + + "Application-Name : label0\n" + + "Application-Type : SPARK-2.4.1\n" + + "User : test\n" + + "Queue : test-queue\n" + + "Start-Time : 1597654469958\n" + + "Finish-Time : 0\n" + + "Progress : 50%\n" + + "State : RUNNING\n" + + "Final-State : UNDEFINED\n" + + "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + + "RPC Port : 40236\n" + + "AM Host : host-name"; + + private final String failedReport = "Application Report :\n" + + "Application-Id : application_15888888888_0088\n" + + "Application-Name : label0\n" + + "Application-Type : SPARK-2.4.1\n" + + "User : test\n" + + "Queue : test-queue\n" + + "Start-Time : 1597654469958\n" + + "Finish-Time : 1597654801939\n" + + "Progress : 100%\n" + + "State : FINISHED\n" + + "Final-State : FAILED\n" + + "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + + "RPC Port : 40236\n" + + "AM Host : host-name"; + + private final String finishReport = "Application Report :\n" + + "Application-Id : application_15888888888_0088\n" + + "Application-Name : label0\n" + + "Application-Type : SPARK-2.4.1\n" + + "User : test\n" + + "Queue : test-queue\n" + + "Start-Time : 1597654469958\n" + + "Finish-Time : 1597654801939\n" + + "Progress : 100%\n" + + "State : FINISHED\n" + + "Final-State : SUCCEEDED\n" + + "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + + "RPC Port : 40236\n" + + "AM Host : host-name"; @Before @@ -137,8 +137,11 @@ public void setUp() { } @Test - public void testSubmitEtlJob(@Mocked BrokerUtil brokerUtil, @Mocked SparkLauncher launcher, @Injectable Process process, - @Mocked SparkLoadAppHandle handle ) throws IOException, LoadException { + public void testSubmitEtlJob( + @Mocked BrokerUtil brokerUtil, + @Mocked SparkLauncher launcher, + @Injectable Process process, + @Mocked SparkLoadAppHandle handle) throws IOException, LoadException { new Expectations() { { launcher.launch(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLauncherMonitorTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLauncherMonitorTest.java index 8f9761896a3ef0..f9874c33c7c5d2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLauncherMonitorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLauncherMonitorTest.java @@ -63,6 +63,7 @@ public void testLogMonitorNormal() { try { logMonitor.join(); } catch (InterruptedException e) { + // CHECKSTYLE IGNORE THIS LINE } } catch (IOException e) { Assert.fail(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLoadPendingTaskTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLoadPendingTaskTest.java index b5462289c96218..840d305d895973 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLoadPendingTaskTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkLoadPendingTaskTest.java @@ -73,7 +73,6 @@ public void testExecuteTask(@Injectable SparkLoadJob sparkLoadJob, @Mocked Catalog catalog, @Injectable SparkLoadAppHandle handle, @Injectable Database database, @Injectable OlapTable table) throws UserException { - long dbId = 0L; long tableId = 1L; // columns @@ -150,7 +149,6 @@ public void testRangePartitionHashDistribution(@Injectable SparkLoadJob sparkLoa @Mocked Catalog catalog, @Injectable Database database, @Injectable OlapTable table) throws LoadException, DdlException, AnalysisException { - long dbId = 0L; long tableId = 1L; // c1 is partition column, c2 is distribution column diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java index f4787a8e1c67df..c0623538981b06 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java @@ -80,11 +80,15 @@ public void testNormal() { new MockUp() { @Mock - boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) - throws UserException { return true; } + boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) throws UserException { + return true; + } + @Mock void parseFile(String path, BrokerDesc brokerDesc, List fileStatuses) - throws UserException { fileStatuses.addAll(files); } + throws UserException { + fileStatuses.addAll(files); + } }; BrokerDesc brokerDesc = new BrokerDesc("broker", Maps.newHashMap()); @@ -131,14 +135,19 @@ void parseFile(String path, BrokerDesc brokerDesc, List fileS public void testArchiveNotExists() { new MockUp() { @Mock - boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) - throws UserException { return false; } + boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) throws UserException { + return false; + } + @Mock - void writeFile(String srcFilePath, String destFilePath, BrokerDesc brokerDesc) - throws UserException { return; } + void writeFile(String srcFilePath, String destFilePath, BrokerDesc brokerDesc) throws UserException { + return; + } + @Mock - void rename(String origFilePath, String destFilePath, BrokerDesc brokerDesc) - throws UserException { return; } + void rename(String origFilePath, String destFilePath, BrokerDesc brokerDesc) throws UserException { + return; + } }; BrokerDesc brokerDesc = new BrokerDesc("broker", Maps.newHashMap()); @@ -188,20 +197,30 @@ void rename(String origFilePath, String destFilePath, BrokerDesc brokerDesc) public void testLibraryMd5MissMatch() { new MockUp() { @Mock - boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) - throws UserException { return true; } + boolean checkPathExist(String remotePath, BrokerDesc brokerDesc) throws UserException { + return true; + } + @Mock void parseFile(String path, BrokerDesc brokerDesc, List fileStatuses) - throws UserException { fileStatuses.addAll(files); } + throws UserException { + fileStatuses.addAll(files); + } + @Mock - void deletePath(String path, BrokerDesc brokerDesc) - throws UserException { return; } + void deletePath(String path, BrokerDesc brokerDesc) throws UserException { + return; + } + @Mock - void writeFile(String srcFilePath, String destFilePath, BrokerDesc brokerDesc) - throws UserException { return; } + void writeFile(String srcFilePath, String destFilePath, BrokerDesc brokerDesc) throws UserException { + return; + } + @Mock - void rename(String origFilePath, String destFilePath, BrokerDesc brokerDesc) - throws UserException { return; } + void rename(String origFilePath, String destFilePath, BrokerDesc brokerDesc) throws UserException { + return; + } }; // new md5dum of local library diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/YarnApplicationReportTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/YarnApplicationReportTest.java index 6a0523b25e054e..e600f512900c96 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/YarnApplicationReportTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/YarnApplicationReportTest.java @@ -26,20 +26,20 @@ import org.junit.Test; public class YarnApplicationReportTest { - private final String runningReport = "Application Report :\n" + - "Application-Id : application_15888888888_0088\n" + - "Application-Name : label0\n" + - "Application-Type : SPARK-2.4.1\n" + - "User : test\n" + - "Queue : test-queue\n" + - "Start-Time : 1597654469958\n" + - "Finish-Time : 0\n" + - "Progress : 50%\n" + - "State : RUNNING\n" + - "Final-State : UNDEFINED\n" + - "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + - "RPC Port : 40236\n" + - "AM Host : host-name"; + private final String runningReport = "Application Report :\n" + + "Application-Id : application_15888888888_0088\n" + + "Application-Name : label0\n" + + "Application-Type : SPARK-2.4.1\n" + + "User : test\n" + + "Queue : test-queue\n" + + "Start-Time : 1597654469958\n" + + "Finish-Time : 0\n" + + "Progress : 50%\n" + + "State : RUNNING\n" + + "Final-State : UNDEFINED\n" + + "Tracking-URL : http://127.0.0.1:8080/proxy/application_1586619723848_0088/\n" + + "RPC Port : 40236\n" + + "AM Host : host-name"; @Test public void testParseToReport() { diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaProducerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaProducerTest.java index 94b81eb3c6509e..4e89a8ee37ac1f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaProducerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaProducerTest.java @@ -51,8 +51,8 @@ public static void main(String[] args) throws InterruptedException { ProducerRecord record = new ProducerRecord<>("miaoling", value); try { RecordMetadata metadata = kafkaProducer.send(record).get(); - System.out.println("Record send with value " + value + " to partition " + - metadata.partition() + " with offset " + metadata.offset()); + System.out.println("Record send with value " + value + " to partition " + + metadata.partition() + " with offset " + metadata.offset()); } catch (ExecutionException e) { System.out.println("Error in sending record " + value); System.out.println(e); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java index 436a93de7a435b..52b49623bfc34f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java @@ -48,7 +48,12 @@ import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.collect.Maps; - +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mock; +import mockit.MockUp; +import mockit.Mocked; +import mockit.Verifications; import org.apache.kafka.common.PartitionInfo; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -61,12 +66,6 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import mockit.Expectations; -import mockit.Injectable; -import mockit.Mock; -import mockit.MockUp; -import mockit.Mocked; -import mockit.Verifications; public class KafkaRoutineLoadJobTest { private static final Logger LOG = LogManager.getLogger(KafkaRoutineLoadJobTest.class); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java index f78f5a563b453a..e4b0282cb44ab8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java @@ -35,19 +35,18 @@ import com.google.common.base.Strings; import com.google.common.collect.Lists; import com.google.common.collect.Maps; - -import org.apache.kafka.common.PartitionInfo; -import org.junit.Assert; -import org.junit.Test; - -import java.util.List; -import java.util.Map; import java_cup.runtime.Symbol; import mockit.Expectations; import mockit.Injectable; import mockit.Mock; import mockit.MockUp; import mockit.Mocked; +import org.apache.kafka.common.PartitionInfo; +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; +import java.util.Map; public class RoutineLoadJobTest { @@ -166,7 +165,6 @@ void writeUnlock() { } }; - String txnStatusChangeReasonString = "no data"; RoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(); Deencapsulation.setField(routineLoadJob, "state", RoutineLoadJob.JobState.RUNNING); Deencapsulation.setField(routineLoadJob, "routineLoadTaskInfoList", routineLoadTaskInfoList); @@ -323,28 +321,28 @@ public void testGetShowCreateInfo() throws UserException { Deencapsulation.setField(routineLoadJob, "maxBatchRows", 10); Deencapsulation.setField(routineLoadJob, "maxBatchRows", 10); String showCreateInfo = routineLoadJob.getShowCreateInfo(); - String expect = "CREATE ROUTINE LOAD test_load ON 11\n" + - "WITH APPEND\n" + - "PROPERTIES\n" + - "(\n" + - "\"desired_concurrent_number\" = \"0\",\n" + - "\"max_error_number\" = \"10\",\n" + - "\"max_batch_interval\" = \"10\",\n" + - "\"max_batch_rows\" = \"10\",\n" + - "\"max_batch_size\" = \"104857600\",\n" + - "\"format\" = \"csv\",\n" + - "\"strip_outer_array\" = \"false\",\n" + - "\"num_as_string\" = \"false\",\n" + - "\"fuzzy_parse\" = \"false\",\n" + - "\"strict_mode\" = \"false\",\n" + - "\"timezone\" = \"Asia/Shanghai\",\n" + - "\"exec_mem_limit\" = \"2147483648\"\n" + - ")\n" + - "FROM KAFKA\n" + - "(\n" + - "\"kafka_broker_list\" = \"localhost:9092\",\n" + - "\"kafka_topic\" = \"test_topic\"\n" + - ");"; + String expect = "CREATE ROUTINE LOAD test_load ON 11\n" + + "WITH APPEND\n" + + "PROPERTIES\n" + + "(\n" + + "\"desired_concurrent_number\" = \"0\",\n" + + "\"max_error_number\" = \"10\",\n" + + "\"max_batch_interval\" = \"10\",\n" + + "\"max_batch_rows\" = \"10\",\n" + + "\"max_batch_size\" = \"104857600\",\n" + + "\"format\" = \"csv\",\n" + + "\"strip_outer_array\" = \"false\",\n" + + "\"num_as_string\" = \"false\",\n" + + "\"fuzzy_parse\" = \"false\",\n" + + "\"strict_mode\" = \"false\",\n" + + "\"timezone\" = \"Asia/Shanghai\",\n" + + "\"exec_mem_limit\" = \"2147483648\"\n" + + ")\n" + + "FROM KAFKA\n" + + "(\n" + + "\"kafka_broker_list\" = \"localhost:9092\",\n" + + "\"kafka_topic\" = \"test_topic\"\n" + + ");"; System.out.println(showCreateInfo); Assert.assertEquals(expect, showCreateInfo); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadManagerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadManagerTest.java index 402205fae0fdf6..a0b0a89fde890b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadManagerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadManagerTest.java @@ -24,7 +24,6 @@ import org.apache.doris.analysis.ResumeRoutineLoadStmt; import org.apache.doris.analysis.Separator; import org.apache.doris.analysis.StopRoutineLoadStmt; -import org.apache.doris.analysis.TableName; import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.Database; @@ -50,7 +49,11 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; - +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mock; +import mockit.MockUp; +import mockit.Mocked; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.Assert; @@ -60,11 +63,6 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import mockit.Expectations; -import mockit.Injectable; -import mockit.Mock; -import mockit.MockUp; -import mockit.Mocked; public class RoutineLoadManagerTest { @@ -82,7 +80,6 @@ public void testAddJobByStmt(@Injectable PaloAuth paloAuth, String dbName = "db1"; LabelName labelName = new LabelName(dbName, jobName); String tableNameString = "table1"; - TableName tableName = new TableName(dbName, tableNameString); List loadPropertyList = new ArrayList<>(); Separator columnSeparator = new Separator(","); loadPropertyList.add(columnSeparator); @@ -152,7 +149,6 @@ public void testCreateJobAuthDeny(@Injectable PaloAuth paloAuth, String dbName = "db1"; LabelName labelName = new LabelName(dbName, jobName); String tableNameString = "table1"; - TableName tableName = new TableName(dbName, tableNameString); List loadPropertyList = new ArrayList<>(); Separator columnSeparator = new Separator(","); loadPropertyList.add(columnSeparator); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadSchedulerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadSchedulerTest.java index 97afc556afbdda..3d697f8d09be5a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadSchedulerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadSchedulerTest.java @@ -34,16 +34,15 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; - +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mocked; import org.junit.Assert; import org.junit.Test; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutorService; -import mockit.Expectations; -import mockit.Injectable; -import mockit.Mocked; public class RoutineLoadSchedulerTest { @@ -77,7 +76,7 @@ public void testNormalRunOneCycle(@Mocked Catalog catalog, KafkaRoutineLoadJob kafkaRoutineLoadJob = new KafkaRoutineLoadJob(1L, "test", clusterName, 1L, 1L, "xxx", "test", UserIdentity.ADMIN); - Deencapsulation.setField(kafkaRoutineLoadJob,"state", RoutineLoadJob.JobState.NEED_SCHEDULE); + Deencapsulation.setField(kafkaRoutineLoadJob, "state", RoutineLoadJob.JobState.NEED_SCHEDULE); List routineLoadJobList = new ArrayList<>(); routineLoadJobList.add(kafkaRoutineLoadJob); @@ -130,7 +129,7 @@ public void testNormalRunOneCycle(@Mocked Catalog catalog, public void functionTest(@Mocked Catalog catalog, @Mocked SystemInfoService systemInfoService, @Injectable Database database) throws DdlException, InterruptedException { - new Expectations(){ + new Expectations() { { connectContext.toResourceCtx(); minTimes = 0; @@ -146,7 +145,7 @@ public void functionTest(@Mocked Catalog catalog, List backendIds = new ArrayList<>(); backendIds.add(1L); - new Expectations(){ + new Expectations() { { catalog.getRoutineLoadManager(); minTimes = 0; diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalTestUtil.java index 797e88ee1d2ff6..e29f58b1daf3f4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalTestUtil.java @@ -84,7 +84,7 @@ public static Message fetchEOFMessage() { public static Message fetchMessage(long id, boolean isRaw, int batchSize, String binlogFile, long offset, String schemaName, String tableName) { List entries = Lists.newArrayList(); - for (int i = 0 ; i < batchSize; i++) { + for (int i = 0; i < batchSize; i++) { entries.add(buildEntry(binlogFile, offset++, 1024, schemaName, tableName)); } return new Message(id, isRaw, entries); diff --git a/fe/fe-core/src/test/java/org/apache/doris/mysql/MysqlChannelTest.java b/fe/fe-core/src/test/java/org/apache/doris/mysql/MysqlChannelTest.java index 17c406579ffab5..c3e65b65449205 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/mysql/MysqlChannelTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/mysql/MysqlChannelTest.java @@ -209,7 +209,7 @@ int fakeRead(ByteBuffer buffer) { MysqlChannel channel1 = new MysqlChannel(channel); - ByteBuffer buf = channel1.fetchOnePacket(); + channel1.fetchOnePacket(); } @Test(expected = IOException.class) @@ -225,7 +225,7 @@ public void testException() throws IOException { MysqlChannel channel1 = new MysqlChannel(channel); - ByteBuffer buf = channel1.fetchOnePacket(); + channel1.fetchOnePacket(); Assert.fail("No Exception throws."); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java index a9c655cdddfd48..b1a5a70e52b824 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java @@ -93,7 +93,7 @@ public void testSerialization() throws Exception { OlapTable table = new OlapTable(1000L, "table", columns, KeysType.AGG_KEYS, new SinglePartitionInfo(), distributionInfo); short shortKeyColumnCount = 1; - table.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS); + table.setIndexMeta(1000, "group1", columns, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS); List column = Lists.newArrayList(); column.add(column2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/FsBrokerTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/FsBrokerTest.java index 15b621bb02bbf0..9274fef82387ea 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/FsBrokerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/FsBrokerTest.java @@ -87,7 +87,6 @@ public void testHeartbeatFailed() throws Exception { DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); FsBroker fsBroker = new FsBroker("127.0.0.1", 8118); - long time = System.currentTimeMillis(); BrokerHbResponse hbResponse = new BrokerHbResponse("broker", "127.0.0.1", 8118, "got exception"); fsBroker.handleHbResponse(hbResponse); fsBroker.write(dos); diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/ReplaceTableOperationLogTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/ReplaceTableOperationLogTest.java index 5ee74bbbca8ee0..ec71c24a13f255 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/ReplaceTableOperationLogTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/ReplaceTableOperationLogTest.java @@ -34,7 +34,7 @@ public void testSerialization() throws Exception { file.createNewFile(); DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - ReplaceTableOperationLog log = new ReplaceTableOperationLog(1,2,3,true); + ReplaceTableOperationLog log = new ReplaceTableOperationLog(1, 2, 3, true); log.write(dos); dos.flush(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/ReplicaPersistInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/ReplicaPersistInfoTest.java index 6d8cd1115d862b..78509823fe880d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/ReplicaPersistInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/ReplicaPersistInfoTest.java @@ -50,7 +50,7 @@ public void testSerialization() throws Exception { // 2. Read objects from file DataInputStream dis = new DataInputStream(new FileInputStream(file)); - ReplicaPersistInfo rInfo2 = ReplicaPersistInfo.read(dis); + ReplicaPersistInfo rInfo2 = ReplicaPersistInfo.read(dis); // CHECKSTYLE IGNORE THIS LINE // 3. delete files dis.close(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ConstantExpressTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ConstantExpressTest.java index 5e30942724b21a..e651166e91e075 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ConstantExpressTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/ConstantExpressTest.java @@ -228,7 +228,6 @@ public void testConstantInPredicate() throws Exception { @Test public void testTimestamp() throws Exception { - testConstantExpressResult("select timestamp('2021-07-24 00:00:00')", - "'2021-07-24 00:00:00'"); + testConstantExpressResult("select timestamp('2021-07-24 00:00:00')", "'2021-07-24 00:00:00'"); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/MaterializedViewFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/MaterializedViewFunctionTest.java index 9b73aa03e7f810..5f4bf368da3031 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/MaterializedViewFunctionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/MaterializedViewFunctionTest.java @@ -137,8 +137,8 @@ public void testProjectionMV4() throws Exception { @Test public void testUnionQueryOnProjectionMV() throws Exception { - String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + - EMPS_TABLE_NAME + " order by deptno;"; + String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + + EMPS_TABLE_NAME + " order by deptno;"; String union = "select empid from " + EMPS_TABLE_NAME + " where deptno > 300" + " union all select empid from" + " " + EMPS_TABLE_NAME + " where deptno < 200"; dorisAssert.withMaterializedView(createMVSql).query(union).explainContains(QUERY_USE_EMPS_MV); @@ -234,8 +234,8 @@ public void testAggQuqeryOnAggMV6() throws Exception { */ @Test public void testGroupingSetQueryOnAggMV() throws Exception { - String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) " + - "from " + EMPS_TABLE_NAME + " group by empid, deptno;"; + String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) " + + "from " + EMPS_TABLE_NAME + " group by empid, deptno;"; String query = "select sum(salary), empid, deptno from " + EMPS_TABLE_NAME + " group by rollup(empid,deptno);"; dorisAssert.withMaterializedView(createMVSql); dorisAssert.query(query).explainContains(QUERY_USE_EMPS_MV); @@ -309,8 +309,8 @@ public void testAggQueryWithSetOperandOnAggMV() throws Exception { String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, count(salary) " + "from " + EMPS_TABLE_NAME + " group by deptno;"; String query = "select deptno, count(salary) + count(1) from " + EMPS_TABLE_NAME - + " group by deptno union " + - "select deptno, count(salary) + count(1) from " + EMPS_TABLE_NAME + + " group by deptno union " + + "select deptno, count(salary) + count(1) from " + EMPS_TABLE_NAME + " group by deptno;"; dorisAssert.withMaterializedView(createMVSql); dorisAssert.query(query).explainContains(QUERY_USE_EMPS); @@ -331,8 +331,8 @@ public void testJoinOnLeftProjectToJoin() throws Exception { @Test public void testJoinOnRightProjectToJoin() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum" + - "(commission) from " + EMPS_TABLE_NAME + " group by deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum" + + "(commission) from " + EMPS_TABLE_NAME + " group by deptno;"; String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno, max(cost) from " + DEPTS_TABLE_NAME + " group by deptno;"; String query = "select * from (select deptno , sum(salary), sum(commission) from " + EMPS_TABLE_NAME @@ -344,8 +344,8 @@ public void testJoinOnRightProjectToJoin() throws Exception { @Test public void testJoinOnProjectsToJoin() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum" + - "(commission) from " + EMPS_TABLE_NAME + " group by deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum" + + "(commission) from " + EMPS_TABLE_NAME + " group by deptno;"; String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno, max(cost) from " + DEPTS_TABLE_NAME + " group by deptno;"; String query = "select * from (select deptno , sum(salary) from " + EMPS_TABLE_NAME + " group by deptno) A " @@ -356,46 +356,46 @@ public void testJoinOnProjectsToJoin() throws Exception { @Test public void testJoinOnCalcToJoin0() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + ";"; - String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + - DEPTS_TABLE_NAME + ";"; - String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " where deptno > 10 ) A " + - "join (select deptno from " + DEPTS_TABLE_NAME + " ) B on A.deptno = B.deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + ";"; + String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + + DEPTS_TABLE_NAME + ";"; + String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " where deptno > 10 ) A " + + "join (select deptno from " + DEPTS_TABLE_NAME + " ) B on A.deptno = B.deptno;"; dorisAssert.withMaterializedView(createDeptsMVSQL).withMaterializedView(createEmpsMVsql).query(query) .explainContains(QUERY_USE_EMPS_MV, QUERY_USE_DEPTS_MV); } @Test public void testJoinOnCalcToJoin1() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + ";"; - String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + - DEPTS_TABLE_NAME + ";"; - String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " ) A join (select " + - "deptno from " + DEPTS_TABLE_NAME + " where deptno > 10 ) B on A.deptno = B.deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + ";"; + String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + + DEPTS_TABLE_NAME + ";"; + String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " ) A join (select " + + "deptno from " + DEPTS_TABLE_NAME + " where deptno > 10 ) B on A.deptno = B.deptno;"; dorisAssert.withMaterializedView(createDeptsMVSQL).withMaterializedView(createEmpsMVsql).query(query) .explainContains(QUERY_USE_EMPS_MV, QUERY_USE_DEPTS_MV); } @Test public void testJoinOnCalcToJoin2() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + ";"; - String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + - DEPTS_TABLE_NAME + ";"; - String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " where empid >10 ) A " + - "join (select deptno from " + DEPTS_TABLE_NAME + " where deptno > 10 ) B on A.deptno = B.deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + ";"; + String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + + DEPTS_TABLE_NAME + ";"; + String query = "select * from (select empid, deptno from " + EMPS_TABLE_NAME + " where empid >10 ) A " + + "join (select deptno from " + DEPTS_TABLE_NAME + " where deptno > 10 ) B on A.deptno = B.deptno;"; dorisAssert.withMaterializedView(createDeptsMVSQL).withMaterializedView(createEmpsMVsql).query(query) .explainContains(QUERY_USE_EMPS_MV, QUERY_USE_DEPTS_MV); } @Test public void testJoinOnCalcToJoin3() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + ";"; - String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + - DEPTS_TABLE_NAME + ";"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + ";"; + String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + + DEPTS_TABLE_NAME + ";"; String query = "select * from (select empid, deptno + 1 deptno from " + EMPS_TABLE_NAME + " where empid >10 )" + " A join (select deptno from " + DEPTS_TABLE_NAME + " where deptno > 10 ) B on A.deptno = B.deptno;"; @@ -405,10 +405,10 @@ public void testJoinOnCalcToJoin3() throws Exception { @Test public void testJoinOnCalcToJoin4() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + ";"; - String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + - DEPTS_TABLE_NAME + ";"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + ";"; + String createDeptsMVSQL = "create materialized view " + DEPTS_MV_NAME + " as select deptno from " + + DEPTS_TABLE_NAME + ";"; String query = "select * from (select empid, deptno + 1 deptno from " + EMPS_TABLE_NAME + " where empid is not null ) A full join (select deptno from " + DEPTS_TABLE_NAME + " where deptno is not null ) B on A.deptno = B.deptno;"; @@ -418,24 +418,24 @@ public void testJoinOnCalcToJoin4() throws Exception { @Test public void testOrderByQueryOnProjectView() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + - EMPS_TABLE_NAME + ";"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + + EMPS_TABLE_NAME + ";"; String query = "select empid from " + EMPS_TABLE_NAME + " order by deptno"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV); } @Test public void testOrderByQueryOnOrderByView() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + - EMPS_TABLE_NAME + " order by deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from " + + EMPS_TABLE_NAME + " order by deptno;"; String query = "select empid from " + EMPS_TABLE_NAME + " order by deptno"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV); } @Test public void testQueryOnStar() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select time, deptno, empid, name, " + - "salary, commission from " + EMPS_TABLE_NAME + " order by time, deptno, empid;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select time, deptno, empid, name, " + + "salary, commission from " + EMPS_TABLE_NAME + " order by time, deptno, empid;"; String query = "select * from " + EMPS_TABLE_NAME + " where deptno = 1"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV); } @@ -493,8 +493,8 @@ public void testAggregateMVCalcGroupByQuery1() throws Exception { public void testAggregateMVCalcGroupByQuery2() throws Exception { String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, sum(salary) " + "from " + EMPS_TABLE_NAME + " group by empid, deptno;"; - String query = "select deptno * empid, sum(salary) + 1 from " + EMPS_TABLE_NAME + " where deptno > 10 " + - "group by deptno * empid;"; + String query = "select deptno * empid, sum(salary) + 1 from " + EMPS_TABLE_NAME + " where deptno > 10 " + + "group by deptno * empid;"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV); } @@ -528,8 +528,8 @@ public void testSubQuery() throws Exception { @Test public void testDistinctQuery() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary) " + - "from " + EMPS_TABLE_NAME + " group by deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary) " + + "from " + EMPS_TABLE_NAME + " group by deptno;"; String query1 = "select distinct deptno from " + EMPS_TABLE_NAME + ";"; dorisAssert.withMaterializedView(createEmpsMVsql); dorisAssert.query(query1).explainContains(QUERY_USE_EMPS_MV); @@ -539,10 +539,10 @@ public void testDistinctQuery() throws Exception { @Test public void testSingleMVMultiUsage() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, salary " + - "from " + EMPS_TABLE_NAME + " order by deptno;"; - String query = "select * from (select deptno, empid from " + EMPS_TABLE_NAME + " where deptno>100) A join " + - "(select deptno, empid from " + EMPS_TABLE_NAME + " where deptno >200) B using (deptno);"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, salary " + + "from " + EMPS_TABLE_NAME + " order by deptno;"; + String query = "select * from (select deptno, empid from " + EMPS_TABLE_NAME + " where deptno>100) A join " + + "(select deptno, empid from " + EMPS_TABLE_NAME + " where deptno >200) B using (deptno);"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV, 2); } @@ -552,8 +552,8 @@ public void testMultiMVMultiUsage() throws Exception { + "from " + EMPS_TABLE_NAME + " order by deptno;"; String createEmpsMVSql02 = "create materialized view emp_mv_02 as select deptno, sum(salary) " + "from " + EMPS_TABLE_NAME + " group by deptno;"; - String query = "select * from (select deptno, empid from " + EMPS_TABLE_NAME + " where deptno>100) A join " + - "(select deptno, sum(salary) from " + EMPS_TABLE_NAME + " where deptno >200 group by deptno) B " + String query = "select * from (select deptno, empid from " + EMPS_TABLE_NAME + " where deptno>100) A join " + + "(select deptno, sum(salary) from " + EMPS_TABLE_NAME + " where deptno >200 group by deptno) B " + "using (deptno);"; dorisAssert.withMaterializedView(createEmpsMVSql01).withMaterializedView(createEmpsMVSql02).query(query) .explainContains("rollup: emp_mv_01", "rollup: emp_mv_02"); @@ -561,8 +561,8 @@ public void testMultiMVMultiUsage() throws Exception { @Test public void testMVOnJoinQuery() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select salary, empid, deptno from " + - EMPS_TABLE_NAME + " order by salary;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select salary, empid, deptno from " + + EMPS_TABLE_NAME + " order by salary;"; String query = "select empid, salary from " + EMPS_TABLE_NAME + " join " + DEPTS_TABLE_NAME + " using (deptno) where salary > 300;"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV, @@ -580,16 +580,16 @@ public void testAggregateMVOnCountDistinctQuery1() throws Exception { @Test public void testQueryAfterTrimingOfUnusedFields() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + " order by empid, deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + " order by empid, deptno;"; String query = "select empid, deptno from (select empid, deptno, salary from " + EMPS_TABLE_NAME + ") A;"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV); } @Test public void testUnionAll() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + " order by empid, deptno;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + " order by empid, deptno;"; String query = "select empid, deptno from " + EMPS_TABLE_NAME + " where empid >1 union all select empid," + " deptno from " + EMPS_TABLE_NAME + " where empid <0;"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV, 2); @@ -597,10 +597,10 @@ public void testUnionAll() throws Exception { @Test public void testUnionDistinct() throws Exception { - String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + - EMPS_TABLE_NAME + " order by empid, deptno;"; - String query = "select empid, deptno from " + EMPS_TABLE_NAME + " where empid >1 union select empid," + - " deptno from " + EMPS_TABLE_NAME + " where empid <0;"; + String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from " + + EMPS_TABLE_NAME + " order by empid, deptno;"; + String query = "select empid, deptno from " + EMPS_TABLE_NAME + " where empid >1 union select empid," + + " deptno from " + EMPS_TABLE_NAME + " where empid <0;"; dorisAssert.withMaterializedView(createEmpsMVsql).query(query).explainContains(QUERY_USE_EMPS_MV, 2); } @@ -672,8 +672,8 @@ public void testUniqueTableInQuery() throws Exception { @Test public void testBitmapUnionInQuery() throws Exception { String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME - + " as select user_id, bitmap_union(to_bitmap(tag_id)) from " + - USER_TAG_TABLE_NAME + " group by user_id;"; + + " as select user_id, bitmap_union(to_bitmap(tag_id)) from " + + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select user_id, bitmap_union_count(to_bitmap(tag_id)) a from " + USER_TAG_TABLE_NAME + " group by user_id having a>1 order by a;"; @@ -682,11 +682,11 @@ public void testBitmapUnionInQuery() throws Exception { @Test public void testBitmapUnionInSubquery() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); - String query = "select user_id from " + USER_TAG_TABLE_NAME + " where user_id in (select user_id from " + - USER_TAG_TABLE_NAME + " group by user_id having bitmap_union_count(to_bitmap(tag_id)) >1 ) ;"; + String query = "select user_id from " + USER_TAG_TABLE_NAME + " where user_id in (select user_id from " + + USER_TAG_TABLE_NAME + " group by user_id having bitmap_union_count(to_bitmap(tag_id)) >1 ) ;"; dorisAssert.query(query).explainContains(USER_TAG_MV_NAME, USER_TAG_TABLE_NAME); } @@ -695,8 +695,8 @@ public void testIncorrectMVRewriteInQuery() throws Exception { String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); - String createEmpMVSql = "create materialized view " + EMPS_MV_NAME + " as select name, deptno from " + - EMPS_TABLE_NAME + ";"; + String createEmpMVSql = "create materialized view " + EMPS_MV_NAME + " as select name, deptno from " + + EMPS_TABLE_NAME + ";"; dorisAssert.withMaterializedView(createEmpMVSql); String query = "select user_name, bitmap_union_count(to_bitmap(tag_id)) a from " + USER_TAG_TABLE_NAME + ", " + "(select name, deptno from " + EMPS_TABLE_NAME + ") a" + " where user_name=a.name group by " @@ -707,22 +707,22 @@ public void testIncorrectMVRewriteInQuery() throws Exception { @Test public void testIncorrectMVRewriteInSubquery() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); - String query = "select user_id, bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " where " + - "user_name in (select user_name from " + USER_TAG_TABLE_NAME + " group by user_name having " + - "bitmap_union_count(to_bitmap(tag_id)) >1 )" + " group by user_id;"; + String query = "select user_id, bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " where " + + "user_name in (select user_name from " + USER_TAG_TABLE_NAME + " group by user_name having " + + "bitmap_union_count(to_bitmap(tag_id)) >1 )" + " group by user_id;"; dorisAssert.query(query).explainContains(QUERY_USE_USER_TAG); } @Test public void testTwoTupleInQuery() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); - String query = "select * from (select user_id, bitmap_union_count(to_bitmap(tag_id)) x from " + - USER_TAG_TABLE_NAME + " group by user_id) a, (select user_name, bitmap_union_count(to_bitmap(tag_id))" + String query = "select * from (select user_id, bitmap_union_count(to_bitmap(tag_id)) x from " + + USER_TAG_TABLE_NAME + " group by user_id) a, (select user_name, bitmap_union_count(to_bitmap(tag_id))" + "" + " y from " + USER_TAG_TABLE_NAME + " group by user_name) b where a.x=b.y;"; dorisAssert.query(query).explainContains(QUERY_USE_USER_TAG, QUERY_USE_USER_TAG_MV); } @@ -739,8 +739,8 @@ public void testAggTableCountDistinctInBitmapType() throws Exception { @Test public void testAggTableCountDistinctInHllType() throws Exception { - String aggTable = "CREATE TABLE " + TEST_TABLE_NAME + " (k1 int, v1 hll " + FunctionSet.HLL_UNION + ") Aggregate KEY (k1) " + - "DISTRIBUTED BY HASH(k1) BUCKETS 3 PROPERTIES ('replication_num' = '1');"; + String aggTable = "CREATE TABLE " + TEST_TABLE_NAME + " (k1 int, v1 hll " + FunctionSet.HLL_UNION + ") Aggregate KEY (k1) " + + "DISTRIBUTED BY HASH(k1) BUCKETS 3 PROPERTIES ('replication_num' = '1');"; dorisAssert.withTable(aggTable); String query = "select k1, count(distinct v1) from " + TEST_TABLE_NAME + " group by k1;"; dorisAssert.query(query).explainContains(TEST_TABLE_NAME, "hll_union_agg"); @@ -749,8 +749,8 @@ public void testAggTableCountDistinctInHllType() throws Exception { @Test public void testCountDistinctToBitmap() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select count(distinct tag_id) from " + USER_TAG_TABLE_NAME + ";"; dorisAssert.query(query).explainContains(USER_TAG_MV_NAME, "bitmap_union_count"); @@ -758,8 +758,8 @@ public void testCountDistinctToBitmap() throws Exception { @Test public void testIncorrectRewriteCountDistinct() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "bitmap_union(to_bitmap(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select user_name, count(distinct tag_id) from " + USER_TAG_TABLE_NAME + " group by user_name;"; dorisAssert.query(query).explainContains(USER_TAG_TABLE_NAME, FunctionSet.COUNT); @@ -767,8 +767,8 @@ public void testIncorrectRewriteCountDistinct() throws Exception { @Test public void testNDVToHll() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select ndv(tag_id) from " + USER_TAG_TABLE_NAME + ";"; dorisAssert.query(query).explainContains(USER_TAG_MV_NAME, "hll_union_agg"); @@ -776,8 +776,8 @@ public void testNDVToHll() throws Exception { @Test public void testApproxCountDistinctToHll() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select approx_count_distinct(tag_id) from " + USER_TAG_TABLE_NAME + ";"; dorisAssert.query(query).explainContains(USER_TAG_MV_NAME, "hll_union_agg"); @@ -785,8 +785,8 @@ public void testApproxCountDistinctToHll() throws Exception { @Test public void testHLLUnionFamilyRewrite() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select `" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME + ";"; String mvColumnName = CreateMaterializedViewStmt.mvColumnBuilder("" + FunctionSet.HLL_UNION + "", "tag_id"); @@ -811,8 +811,8 @@ public void testAggInHaving() throws Exception { @Test public void testCountFieldInQuery() throws Exception { - String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + - "count(tag_id) from " + USER_TAG_TABLE_NAME + " group by user_id;"; + String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, " + + "count(tag_id) from " + USER_TAG_TABLE_NAME + " group by user_id;"; dorisAssert.withMaterializedView(createUserTagMVSql); String query = "select count(tag_id) from " + USER_TAG_TABLE_NAME + ";"; String mvColumnName = CreateMaterializedViewStmt.mvColumnBuilder(FunctionSet.COUNT, "tag_id"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapScanNodeTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapScanNodeTest.java index 68517a29c74638..0fa0d0942d143d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapScanNodeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapScanNodeTest.java @@ -116,62 +116,47 @@ public void testHashPartitionManyUser() throws AnalysisException { @Test public void testHashForIntLiteral() { - { + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(1), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 1); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(2), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 0); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(3), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 0); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(4), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 1); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(5), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 2); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE PartitionKey hashKey = new PartitionKey(); hashKey.pushColumn(new IntLiteral(6), PrimitiveType.BIGINT); long hashValue = hashKey.getHashValue(); long mod = (int) ((hashValue & 0xffffffff) % 3); Assert.assertEquals(mod, 2); - } + } // CHECKSTYLE IGNORE THIS LINE } - -// @Test -// public void testConstructInputPartitionByDistributionInfo(@Injectable OlapTable olapTable, -// @Injectable TupleDescriptor tupleDescriptor) { -// PlanNodeId planNodeId = new PlanNodeId(1); -// OlapScanNode olapScanNode = new OlapScanNode(planNodeId, tupleDescriptor, "scan node"); -// Deencapsulation.setField(olapScanNode, "olapTable", olapTable); -// new Expectations() { -// { -// olapTable.getDefaultDistributionInfo(); -// result = -// } -// }; -// olapScanNode.constructInputPartitionByDistributionInfo(); -// } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java index 1ca91b77ebf151..181bd08bdaa0c9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java @@ -18,7 +18,6 @@ package org.apache.doris.planner; import org.apache.doris.analysis.DescriptorTable; -import org.apache.doris.analysis.PartitionValue; import org.apache.doris.analysis.SlotDescriptor; import org.apache.doris.analysis.TupleDescriptor; import org.apache.doris.catalog.Column; @@ -27,7 +26,6 @@ import org.apache.doris.catalog.MaterializedIndex; import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Partition; -import org.apache.doris.catalog.PartitionKey; import org.apache.doris.catalog.PartitionType; import org.apache.doris.catalog.PrimitiveType; import org.apache.doris.catalog.RangePartitionInfo; @@ -91,13 +89,18 @@ public void testSinglePartition() throws UserException { 2, Lists.newArrayList(new Column("k1", PrimitiveType.BIGINT))); Partition partition = new Partition(2, "p1", index, distInfo); - new Expectations() {{ - dstTable.getId(); result = 1; - dstTable.getPartitionInfo(); result = partInfo; - dstTable.getPartitions(); result = Lists.newArrayList(partition); + new Expectations() { + { + dstTable.getId(); + result = 1; + dstTable.getPartitionInfo(); + result = partInfo; + dstTable.getPartitions(); + result = Lists.newArrayList(partition); dstTable.getPartition(2L); result = partition; - }}; + } + }; OlapTableSink sink = new OlapTableSink(dstTable, tuple, Lists.newArrayList(2L)); sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false); @@ -116,25 +119,32 @@ public void testRangePartition( 2, Lists.newArrayList(new Column("k1", PrimitiveType.BIGINT))); Column partKey = new Column("k2", PrimitiveType.VARCHAR); - PartitionKey key = PartitionKey.createPartitionKey(Lists.newArrayList(new PartitionValue("123")), Lists.newArrayList(partKey)); Partition p1 = new Partition(1, "p1", index, distInfo); Partition p2 = new Partition(2, "p2", index, distInfo); - new Expectations() {{ - dstTable.getId(); result = 1; - dstTable.getPartitionInfo(); result = partInfo; - partInfo.getType(); result = PartitionType.RANGE; - partInfo.getPartitionColumns(); result = Lists.newArrayList(partKey); - dstTable.getPartitions(); result = Lists.newArrayList(p1, p2); - dstTable.getPartition(p1.getId()); result = p1; - }}; + new Expectations() { + { + dstTable.getId(); + result = 1; + dstTable.getPartitionInfo(); + result = partInfo; + partInfo.getType(); + result = PartitionType.RANGE; + partInfo.getPartitionColumns(); + result = Lists.newArrayList(partKey); + dstTable.getPartitions(); + result = Lists.newArrayList(p1, p2); + dstTable.getPartition(p1.getId()); + result = p1; + } + }; OlapTableSink sink = new OlapTableSink(dstTable, tuple, Lists.newArrayList(p1.getId())); sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false); try { sink.complete(); } catch (UserException e) { - + // CHECKSTYLE IGNORE THIS LINE } LOG.info("sink is {}", sink.toThrift()); LOG.info("{}", sink.getExplainString("", TExplainLevel.NORMAL)); @@ -147,9 +157,12 @@ public void testRangeUnknownPartition( TupleDescriptor tuple = getTuple(); long unknownPartId = 12345L; - new Expectations() {{ - dstTable.getPartition(unknownPartId); result = null; - }}; + new Expectations() { + { + dstTable.getPartition(unknownPartId); + result = null; + } + }; OlapTableSink sink = new OlapTableSink(dstTable, tuple, Lists.newArrayList(unknownPartId)); sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false); @@ -168,25 +181,32 @@ public void testListPartition( 2, Lists.newArrayList(new Column("k1", PrimitiveType.BIGINT))); Column partKey = new Column("k2", PrimitiveType.VARCHAR); - PartitionKey key = PartitionKey.createPartitionKey(Lists.newArrayList(new PartitionValue("123")), Lists.newArrayList(partKey)); Partition p1 = new Partition(1, "p1", index, distInfo); Partition p2 = new Partition(2, "p2", index, distInfo); - new Expectations() {{ - dstTable.getId(); result = 1; - dstTable.getPartitionInfo(); result = partInfo; - partInfo.getType(); result = PartitionType.LIST; - partInfo.getPartitionColumns(); result = Lists.newArrayList(partKey); - dstTable.getPartitions(); result = Lists.newArrayList(p1, p2); - dstTable.getPartition(p1.getId()); result = p1; - }}; + new Expectations() { + { + dstTable.getId(); + result = 1; + dstTable.getPartitionInfo(); + result = partInfo; + partInfo.getType(); + result = PartitionType.LIST; + partInfo.getPartitionColumns(); + result = Lists.newArrayList(partKey); + dstTable.getPartitions(); + result = Lists.newArrayList(p1, p2); + dstTable.getPartition(p1.getId()); + result = p1; + } + }; OlapTableSink sink = new OlapTableSink(dstTable, tuple, Lists.newArrayList(p1.getId())); sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false); try { sink.complete(); } catch (UserException e) { - + // CHECKSTYLE IGNORE THIS LINE } LOG.info("sink is {}", sink.toThrift()); LOG.info("{}", sink.getExplainString("", TExplainLevel.NORMAL)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java index 39aee63d0b7c1f..1a4832b4208aac 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java @@ -38,30 +38,30 @@ protected void runBeforeAll() throws Exception { createDatabase("db1"); // Create tables. - String tbl1 = "create table db1.tbl1(" + - "k1 varchar(32), " + - "k2 varchar(32), " + - "k3 varchar(32), " + - "k4 int) " + - "AGGREGATE KEY(k1, k2,k3,k4) " + - "distributed by hash(k1) buckets 3 " + - "properties('replication_num' = '1');"; - - String tbl2 = "create table db1.tbl2(" + - "k1 int, " + - "k2 int sum) " + - "AGGREGATE KEY(k1) " + - "partition by range(k1) () " + - "distributed by hash(k1) buckets 3 " + - "properties('replication_num' = '1');"; - - String tbl3 = "create table db1.tbl3 (" + - "k1 date, " + - "k2 varchar(128) NULL, " + - "k3 varchar(5000) NULL) " + - "DUPLICATE KEY(k1, k2, k3) " + - "distributed by hash(k1) buckets 1 " + - "properties ('replication_num' = '1');"; + String tbl1 = "create table db1.tbl1(" + + "k1 varchar(32), " + + "k2 varchar(32), " + + "k3 varchar(32), " + + "k4 int) " + + "AGGREGATE KEY(k1, k2,k3,k4) " + + "distributed by hash(k1) buckets 3 " + + "properties('replication_num' = '1');"; + + String tbl2 = "create table db1.tbl2(" + + "k1 int, " + + "k2 int sum) " + + "AGGREGATE KEY(k1) " + + "partition by range(k1) () " + + "distributed by hash(k1) buckets 3 " + + "properties('replication_num' = '1');"; + + String tbl3 = "create table db1.tbl3 (" + + "k1 date, " + + "k2 varchar(128) NULL, " + + "k3 varchar(5000) NULL) " + + "DUPLICATE KEY(k1, k2, k3) " + + "distributed by hash(k1) buckets 1 " + + "properties ('replication_num' = '1');"; String tbl4 = "create table db1.tbl4(" + "k1 int," @@ -261,56 +261,56 @@ public void testSetOperation() throws Exception { Assert.assertTrue(fragments10.get(0).getPlanRoot() .getFragment().getPlanRoot().getChild(1) instanceof UnionNode); - String sql11 = "SELECT a.x FROM\n" + - "(SELECT '01' x) a \n" + - "INNER JOIN\n" + - "(SELECT '01' x UNION all SELECT '02') b"; + String sql11 = "SELECT a.x FROM\n" + + "(SELECT '01' x) a \n" + + "INNER JOIN\n" + + "(SELECT '01' x UNION all SELECT '02') b"; StmtExecutor stmtExecutor11 = new StmtExecutor(connectContext, sql11); stmtExecutor11.execute(); Planner planner11 = stmtExecutor11.planner(); - SetOperationNode setNode11 = (SetOperationNode)(planner11.getFragments().get(1).getPlanRoot()); + SetOperationNode setNode11 = (SetOperationNode) (planner11.getFragments().get(1).getPlanRoot()); Assert.assertEquals(2, setNode11.getMaterializedConstExprLists().size()); - String sql12 = "SELECT a.x \n" + - "FROM (SELECT '01' x) a \n" + - "INNER JOIN \n" + - "(SELECT k1 from db1.tbl1 \n" + - "UNION all \n" + - "SELECT k1 from db1.tbl1) b;"; + String sql12 = "SELECT a.x \n" + + "FROM (SELECT '01' x) a \n" + + "INNER JOIN \n" + + "(SELECT k1 from db1.tbl1 \n" + + "UNION all \n" + + "SELECT k1 from db1.tbl1) b;"; StmtExecutor stmtExecutor12 = new StmtExecutor(connectContext, sql12); stmtExecutor12.execute(); Planner planner12 = stmtExecutor12.planner(); - SetOperationNode setNode12 = (SetOperationNode)(planner12.getFragments().get(1).getPlanRoot()); - Assert.assertEquals(2, setNode12.getMaterializedResultExprLists().size()); + SetOperationNode setNode12 = (SetOperationNode) (planner12.getFragments().get(1).getPlanRoot()); + Assertions.assertEquals(2, setNode12.getMaterializedResultExprLists().size()); } @Test - public void testPushDown() throws Exception{ + public void testPushDown() throws Exception { String sql1 = - "SELECT\n" + - " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" + - " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" + - " k4\n" + - "FROM\n" + - "(\n" + - " SELECT\n" + - " k1,\n" + - " k2,\n" + - " k3,\n" + - " SUM(k4) AS k4\n" + - " FROM db1.tbl1\n" + - " WHERE k1 = 0\n" + - " AND k4 = 1\n" + - " AND k3 = 'foo'\n" + - " GROUP BY \n" + - " GROUPING SETS (\n" + - " (k1),\n" + - " (k1, k2),\n" + - " (k1, k3),\n" + - " (k1, k2, k3)\n" + - " )\n" + - ") t\n" + - "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; + "SELECT\n" + + " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" + + " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" + + " k4\n" + + "FROM\n" + + "(\n" + + " SELECT\n" + + " k1,\n" + + " k2,\n" + + " k3,\n" + + " SUM(k4) AS k4\n" + + " FROM db1.tbl1\n" + + " WHERE k1 = 0\n" + + " AND k4 = 1\n" + + " AND k3 = 'foo'\n" + + " GROUP BY \n" + + " GROUPING SETS (\n" + + " (k1),\n" + + " (k1, k2),\n" + + " (k1, k3),\n" + + " (k1, k2, k3)\n" + + " )\n" + + ") t\n" + + "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); stmtExecutor1.execute(); Planner planner1 = stmtExecutor1.planner(); @@ -320,24 +320,24 @@ public void testPushDown() throws Exception{ Assert.assertEquals(3, fragments1.get(0).getPlanRoot().getChild(0).getChild(0).conjuncts.size()); String sql2 = - "SELECT\n" + - " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" + - " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" + - " k4\n" + - "FROM\n" + - "(\n" + - " SELECT\n" + - " k1,\n" + - " k2,\n" + - " k3,\n" + - " SUM(k4) AS k4\n" + - " FROM db1.tbl1\n" + - " WHERE k1 = 0\n" + - " AND k4 = 1\n" + - " AND k3 = 'foo'\n" + - " GROUP BY k1, k2, k3\n" + - ") t\n" + - "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; + "SELECT\n" + + " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" + + " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" + + " k4\n" + + "FROM\n" + + "(\n" + + " SELECT\n" + + " k1,\n" + + " k2,\n" + + " k3,\n" + + " SUM(k4) AS k4\n" + + " FROM db1.tbl1\n" + + " WHERE k1 = 0\n" + + " AND k4 = 1\n" + + " AND k3 = 'foo'\n" + + " GROUP BY k1, k2, k3\n" + + ") t\n" + + "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; StmtExecutor stmtExecutor2 = new StmtExecutor(connectContext, sql2); stmtExecutor2.execute(); Planner planner2 = stmtExecutor2.planner(); @@ -349,9 +349,9 @@ public void testPushDown() throws Exception{ @Test public void testWithStmtSlotIsAllowNull() throws Exception { // union - String sql1 = "with a as (select NULL as user_id ), " + - "b as ( select '543' as user_id) " + - "select user_id from a union all select user_id from b"; + String sql1 = "with a as (select NULL as user_id ), " + + "b as ( select '543' as user_id) " + + "select user_id from a union all select user_id from b"; StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); stmtExecutor1.execute(); @@ -371,11 +371,11 @@ public void testAccessingVisibleColumnWithoutPartition() throws Exception { @Test public void testAnalyticSortNodeLeftJoin() throws Exception { - String sql = "SELECT a.k1, a.k3, SUM(COUNT(t.k2)) OVER (PARTITION BY a.k3 ORDER BY a.k1) AS c\n" + - "FROM ( SELECT k1, k3 FROM db1.tbl3) a\n" + - "LEFT JOIN (SELECT 1 AS line, k1, k2, k3 FROM db1.tbl3) t\n" + - "ON t.k1 = a.k1 AND t.k3 = a.k3\n" + - "GROUP BY a.k1, a.k3"; + String sql = "SELECT a.k1, a.k3, SUM(COUNT(t.k2)) OVER (PARTITION BY a.k3 ORDER BY a.k1) AS c\n" + + "FROM ( SELECT k1, k3 FROM db1.tbl3) a\n" + + "LEFT JOIN (SELECT 1 AS line, k1, k2, k3 FROM db1.tbl3) t\n" + + "ON t.k1 = a.k1 AND t.k3 = a.k3\n" + + "GROUP BY a.k1, a.k3"; StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); stmtExecutor.execute(); Assert.assertNotNull(stmtExecutor.planner()); @@ -441,8 +441,8 @@ public void testBigintSlotRefCompareDecimalLiteral() { public void testStringType() { String createTbl1 = "create table db1.tbl1(k1 string, k2 varchar(32), k3 varchar(32), k4 int) " + "AGGREGATE KEY(k1, k2,k3,k4) distributed by hash(k1) buckets 3 properties('replication_num' = '1')"; - AnalysisException exception = - Assertions.assertThrows(AnalysisException.class, () -> parseAndAnalyzeStmt(createTbl1)); + AnalysisException exception = Assertions.assertThrows( + AnalysisException.class, () -> parseAndAnalyzeStmt(createTbl1)); Assertions.assertTrue(exception.getMessage().contains("String Type should not be used in key column[k1].")); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java index 81e751a0a4b1da..8c92582c9028f4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java @@ -65,354 +65,354 @@ protected void runBeforeAll() throws Exception { // create database createDatabase("test"); - createTable("create table test.test1\n" + - "(\n" + - " query_id varchar(48) comment \"Unique query id\",\n" + - " time datetime not null comment \"Query start time\",\n" + - " client_ip varchar(32) comment \"Client IP\",\n" + - " user varchar(64) comment \"User name\",\n" + - " db varchar(96) comment \"Database of this query\",\n" + - " state varchar(8) comment \"Query result state. EOF, ERR, OK\",\n" + - " query_time bigint comment \"Query execution time in millisecond\",\n" + - " scan_bytes bigint comment \"Total scan bytes of this query\",\n" + - " scan_rows bigint comment \"Total scan rows of this query\",\n" + - " return_rows bigint comment \"Returned rows of this query\",\n" + - " stmt_id int comment \"An incremental id of statement\",\n" + - " is_query tinyint comment \"Is this statemt a query. 1 or 0\",\n" + - " frontend_ip varchar(32) comment \"Frontend ip of executing this statement\",\n" + - " stmt varchar(2048) comment \"The original statement, trimed if longer than 2048 bytes\"\n" + - ")\n" + - "partition by range(time) ()\n" + - "distributed by hash(query_id) buckets 1\n" + - "properties(\n" + - " \"dynamic_partition.time_unit\" = \"DAY\",\n" + - " \"dynamic_partition.start\" = \"-30\",\n" + - " \"dynamic_partition.end\" = \"3\",\n" + - " \"dynamic_partition.prefix\" = \"p\",\n" + - " \"dynamic_partition.buckets\" = \"1\",\n" + - " \"dynamic_partition.enable\" = \"true\",\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.bitmap_table (\n" + - " `id` int(11) NULL COMMENT \"\",\n" + - " `id2` bitmap bitmap_union NULL\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`id`)\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.join1 (\n" + - " `dt` int(11) COMMENT \"\",\n" + - " `id` int(11) COMMENT \"\",\n" + - " `value` varchar(8) COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `id`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.join2 (\n" + - " `dt` int(11) COMMENT \"\",\n" + - " `id` int(11) COMMENT \"\",\n" + - " `value` varchar(8) COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `id`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.bitmap_table_2 (\n" + - " `id` int(11) NULL COMMENT \"\",\n" + - " `id2` bitmap bitmap_union NULL,\n" + - " `id3` bitmap bitmap_union NULL\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`id`)\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.hll_table (\n" + - " `id` int(11) NULL COMMENT \"\",\n" + - " `id2` hll hll_union NULL\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`id`)\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`bigtable` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` bigint(20) NULL COMMENT \"\",\n" + - " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + - " `k6` char(5) NULL COMMENT \"\",\n" + - " `k10` date NULL COMMENT \"\",\n" + - " `k11` datetime NULL COMMENT \"\",\n" + - " `k7` varchar(20) NULL COMMENT \"\",\n" + - " `k8` double MAX NULL COMMENT \"\",\n" + - " `k9` float SUM NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`baseall` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` bigint(20) NULL COMMENT \"\",\n" + - " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + - " `k6` char(5) NULL COMMENT \"\",\n" + - " `k10` date NULL COMMENT \"\",\n" + - " `k11` datetime NULL COMMENT \"\",\n" + - " `k7` varchar(20) NULL COMMENT \"\",\n" + - " `k8` double MAX NULL COMMENT \"\",\n" + - " `k9` float SUM NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`dynamic_partition` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` bigint(20) NULL COMMENT \"\",\n" + - " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + - " `k6` char(5) NULL COMMENT \"\",\n" + - " `k10` date NULL COMMENT \"\",\n" + - " `k11` datetime NULL COMMENT \"\",\n" + - " `k7` varchar(20) NULL COMMENT \"\",\n" + - " `k8` double MAX NULL COMMENT \"\",\n" + - " `k9` float SUM NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`app_profile` (\n" + - " `event_date` date NOT NULL COMMENT \"\",\n" + - " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + - " `age` varchar(32) NOT NULL COMMENT \"\",\n" + - " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + - " `level` varchar(64) NOT NULL COMMENT \"\",\n" + - " `city` varchar(64) NOT NULL COMMENT \"\",\n" + - " `model` varchar(64) NOT NULL COMMENT \"\",\n" + - " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + - " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + - " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + - " `use_time` double SUM NOT NULL COMMENT \"\",\n" + - " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" + createTable("create table test.test1\n" + + "(\n" + + " query_id varchar(48) comment \"Unique query id\",\n" + + " time datetime not null comment \"Query start time\",\n" + + " client_ip varchar(32) comment \"Client IP\",\n" + + " user varchar(64) comment \"User name\",\n" + + " db varchar(96) comment \"Database of this query\",\n" + + " state varchar(8) comment \"Query result state. EOF, ERR, OK\",\n" + + " query_time bigint comment \"Query execution time in millisecond\",\n" + + " scan_bytes bigint comment \"Total scan bytes of this query\",\n" + + " scan_rows bigint comment \"Total scan rows of this query\",\n" + + " return_rows bigint comment \"Returned rows of this query\",\n" + + " stmt_id int comment \"An incremental id of statement\",\n" + + " is_query tinyint comment \"Is this statemt a query. 1 or 0\",\n" + + " frontend_ip varchar(32) comment \"Frontend ip of executing this statement\",\n" + + " stmt varchar(2048) comment \"The original statement, trimed if longer than 2048 bytes\"\n" + + ")\n" + + "partition by range(time) ()\n" + + "distributed by hash(query_id) buckets 1\n" + + "properties(\n" + + " \"dynamic_partition.time_unit\" = \"DAY\",\n" + + " \"dynamic_partition.start\" = \"-30\",\n" + + " \"dynamic_partition.end\" = \"3\",\n" + + " \"dynamic_partition.prefix\" = \"p\",\n" + + " \"dynamic_partition.buckets\" = \"1\",\n" + + " \"dynamic_partition.enable\" = \"true\",\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.bitmap_table (\n" + + " `id` int(11) NULL COMMENT \"\",\n" + + " `id2` bitmap bitmap_union NULL\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`id`)\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.join1 (\n" + + " `dt` int(11) COMMENT \"\",\n" + + " `id` int(11) COMMENT \"\",\n" + + " `value` varchar(8) COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `id`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.join2 (\n" + + " `dt` int(11) COMMENT \"\",\n" + + " `id` int(11) COMMENT \"\",\n" + + " `value` varchar(8) COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `id`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.bitmap_table_2 (\n" + + " `id` int(11) NULL COMMENT \"\",\n" + + " `id2` bitmap bitmap_union NULL,\n" + + " `id3` bitmap bitmap_union NULL\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`id`)\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.hll_table (\n" + + " `id` int(11) NULL COMMENT \"\",\n" + + " `id2` hll hll_union NULL\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`id`)\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`bigtable` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` bigint(20) NULL COMMENT \"\",\n" + + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + + " `k6` char(5) NULL COMMENT \"\",\n" + + " `k10` date NULL COMMENT \"\",\n" + + " `k11` datetime NULL COMMENT \"\",\n" + + " `k7` varchar(20) NULL COMMENT \"\",\n" + + " `k8` double MAX NULL COMMENT \"\",\n" + + " `k9` float SUM NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`baseall` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` bigint(20) NULL COMMENT \"\",\n" + + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + + " `k6` char(5) NULL COMMENT \"\",\n" + + " `k10` date NULL COMMENT \"\",\n" + + " `k11` datetime NULL COMMENT \"\",\n" + + " `k7` varchar(20) NULL COMMENT \"\",\n" + + " `k8` double MAX NULL COMMENT \"\",\n" + + " `k9` float SUM NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`dynamic_partition` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` bigint(20) NULL COMMENT \"\",\n" + + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + + " `k6` char(5) NULL COMMENT \"\",\n" + + " `k10` date NULL COMMENT \"\",\n" + + " `k11` datetime NULL COMMENT \"\",\n" + + " `k7` varchar(20) NULL COMMENT \"\",\n" + + " `k8` double MAX NULL COMMENT \"\",\n" + + " `k9` float SUM NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`app_profile` (\n" + + " `event_date` date NOT NULL COMMENT \"\",\n" + + " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" + + " `age` varchar(32) NOT NULL COMMENT \"\",\n" + + " `gender` varchar(32) NOT NULL COMMENT \"\",\n" + + " `level` varchar(64) NOT NULL COMMENT \"\",\n" + + " `city` varchar(64) NOT NULL COMMENT \"\",\n" + + " `model` varchar(64) NOT NULL COMMENT \"\",\n" + + " `brand` varchar(64) NOT NULL COMMENT \"\",\n" + + " `hours` varchar(16) NOT NULL COMMENT \"\",\n" + + " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" + + " `use_time` double SUM NOT NULL COMMENT \"\",\n" + + " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`event_date`)\n" + - "(PARTITION p_20200301 VALUES [('2020-02-27'), ('2020-03-02')),\n" + - "PARTITION p_20200306 VALUES [('2020-03-02'), ('2020-03-07')))\n" + - "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`event_date`)\n" + + "(PARTITION p_20200301 VALUES [('2020-02-27'), ('2020-03-02')),\n" + + "PARTITION p_20200306 VALUES [('2020-03-02'), ('2020-03-07')))\n" + + "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, `city`, `model`, `brand`, `hours`) BUCKETS 1\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`pushdown_test` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\",\n" + - " `k3` int(11) NULL COMMENT \"\",\n" + - " `k4` bigint(20) NULL COMMENT \"\",\n" + - " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + - " `k6` char(5) NULL COMMENT \"\",\n" + - " `k10` date NULL COMMENT \"\",\n" + - " `k11` datetime NULL COMMENT \"\",\n" + - " `k7` varchar(20) NULL COMMENT \"\",\n" + - " `k8` double MAX NULL COMMENT \"\",\n" + - " `k9` float SUM NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`k1`)\n" + - "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" + - "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" + - "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"DEFAULT\"\n" + - ");"); - - createTable("create table test.jointest\n" + - "(k1 int, k2 int) distributed by hash(k1) buckets 1\n" + - "properties(\"replication_num\" = \"1\");"); - - createTable("create table test.bucket_shuffle1\n" + - "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 5\n" + - "properties(\"replication_num\" = \"1\"" + - ");"); - - createTable("CREATE TABLE test.`bucket_shuffle2` (\n" + - " `k1` int NULL COMMENT \"\",\n" + - " `k2` int(6) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`k1`)\n" + - "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" + - "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" + - "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" + - "DISTRIBUTED BY HASH(k1, k2) BUCKETS 5\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"DEFAULT\"\n" + - ");"); - - createTable("create table test.colocate1\n" + - "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" + - "properties(\"replication_num\" = \"1\"," + - "\"colocate_with\" = \"group1\");"); - - createTable("create table test.colocate2\n" + - "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" + - "properties(\"replication_num\" = \"1\"," + - "\"colocate_with\" = \"group1\");"); - - createTable("create external table test.mysql_table\n" + - "(k1 int, k2 int)\n" + - "ENGINE=MYSQL\n" + - "PROPERTIES (\n" + - "\"host\" = \"127.0.0.1\",\n" + - "\"port\" = \"3306\",\n" + - "\"user\" = \"root\",\n" + - "\"password\" = \"123\",\n" + - "\"database\" = \"db1\",\n" + - "\"table\" = \"tbl1\"\n" + - ");"); - - createTable("CREATE TABLE test.`table_partitioned` (\n" + - " `dt` int(11) NOT NULL COMMENT \"\",\n" + - " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `dis_key`)\n" + - "PARTITION BY RANGE(`dt`)\n" + - "(PARTITION p20200101 VALUES [(\"-1\"), (\"20200101\")),\n" + - "PARTITION p20200201 VALUES [(\"20200101\"), (\"20200201\")))\n" + - "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); - - createTable("CREATE TABLE test.`table_unpartitioned` (\n" + - " `dt` int(11) NOT NULL COMMENT \"\",\n" + - " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`dt`, `dis_key`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); - - createTable("create external table test.odbc_oracle\n" + - "(k1 float, k2 int)\n" + - "ENGINE=ODBC\n" + - "PROPERTIES (\n" + - "\"host\" = \"127.0.0.1\",\n" + - "\"port\" = \"3306\",\n" + - "\"user\" = \"root\",\n" + - "\"password\" = \"123\",\n" + - "\"database\" = \"db1\",\n" + - "\"table\" = \"tbl1\",\n" + - "\"driver\" = \"Oracle Driver\",\n" + - "\"odbc_type\" = \"oracle\"\n" + - ");"); - - createTable("create external table test.odbc_mysql\n" + - "(k1 int, k2 int)\n" + - "ENGINE=ODBC\n" + - "PROPERTIES (\n" + - "\"host\" = \"127.0.0.1\",\n" + - "\"port\" = \"3306\",\n" + - "\"user\" = \"root\",\n" + - "\"password\" = \"123\",\n" + - "\"database\" = \"db1\",\n" + - "\"table\" = \"tbl1\",\n" + - "\"driver\" = \"Oracle Driver\",\n" + - "\"odbc_type\" = \"mysql\"\n" + - ");"); - - createTable("create table test.tbl_int_date (" + - "`date` datetime NULL," + - "`day` date NULL," + - "`site_id` int(11) NULL )" + - " ENGINE=OLAP " + - "DUPLICATE KEY(`date`, `day`, `site_id`)" + - "DISTRIBUTED BY HASH(`site_id`) BUCKETS 10 " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"); + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`pushdown_test` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\",\n" + + " `k3` int(11) NULL COMMENT \"\",\n" + + " `k4` bigint(20) NULL COMMENT \"\",\n" + + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" + + " `k6` char(5) NULL COMMENT \"\",\n" + + " `k10` date NULL COMMENT \"\",\n" + + " `k11` datetime NULL COMMENT \"\",\n" + + " `k7` varchar(20) NULL COMMENT \"\",\n" + + " `k8` double MAX NULL COMMENT \"\",\n" + + " `k9` float SUM NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`k1`)\n" + + "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" + + "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" + + "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"DEFAULT\"\n" + + ");"); + + createTable("create table test.jointest\n" + + "(k1 int, k2 int) distributed by hash(k1) buckets 1\n" + + "properties(\"replication_num\" = \"1\");"); + + createTable("create table test.bucket_shuffle1\n" + + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 5\n" + + "properties(\"replication_num\" = \"1\"" + + ");"); + + createTable("CREATE TABLE test.`bucket_shuffle2` (\n" + + " `k1` int NULL COMMENT \"\",\n" + + " `k2` int(6) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`k1`)\n" + + "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" + + "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" + + "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" + + "DISTRIBUTED BY HASH(k1, k2) BUCKETS 5\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"DEFAULT\"\n" + + ");"); + + createTable("create table test.colocate1\n" + + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" + + "properties(\"replication_num\" = \"1\"," + + "\"colocate_with\" = \"group1\");"); + + createTable("create table test.colocate2\n" + + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" + + "properties(\"replication_num\" = \"1\"," + + "\"colocate_with\" = \"group1\");"); + + createTable("create external table test.mysql_table\n" + + "(k1 int, k2 int)\n" + + "ENGINE=MYSQL\n" + + "PROPERTIES (\n" + + "\"host\" = \"127.0.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"root\",\n" + + "\"password\" = \"123\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\"\n" + + ");"); + + createTable("CREATE TABLE test.`table_partitioned` (\n" + + " `dt` int(11) NOT NULL COMMENT \"\",\n" + + " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `dis_key`)\n" + + "PARTITION BY RANGE(`dt`)\n" + + "(PARTITION p20200101 VALUES [(\"-1\"), (\"20200101\")),\n" + + "PARTITION p20200201 VALUES [(\"20200101\"), (\"20200201\")))\n" + + "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); + + createTable("CREATE TABLE test.`table_unpartitioned` (\n" + + " `dt` int(11) NOT NULL COMMENT \"\",\n" + + " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`dt`, `dis_key`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); + + createTable("create external table test.odbc_oracle\n" + + "(k1 float, k2 int)\n" + + "ENGINE=ODBC\n" + + "PROPERTIES (\n" + + "\"host\" = \"127.0.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"root\",\n" + + "\"password\" = \"123\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\",\n" + + "\"driver\" = \"Oracle Driver\",\n" + + "\"odbc_type\" = \"oracle\"\n" + + ");"); + + createTable("create external table test.odbc_mysql\n" + + "(k1 int, k2 int)\n" + + "ENGINE=ODBC\n" + + "PROPERTIES (\n" + + "\"host\" = \"127.0.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"root\",\n" + + "\"password\" = \"123\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\",\n" + + "\"driver\" = \"Oracle Driver\",\n" + + "\"odbc_type\" = \"mysql\"\n" + + ");"); + + createTable("create table test.tbl_int_date (" + + "`date` datetime NULL," + + "`day` date NULL," + + "`site_id` int(11) NULL )" + + " ENGINE=OLAP " + + "DUPLICATE KEY(`date`, `day`, `site_id`)" + + "DISTRIBUTED BY HASH(`site_id`) BUCKETS 10 " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"); createView("create view test.tbl_null_column_view AS SELECT *,NULL as add_column FROM test.test1;"); createView("create view test.function_view AS SELECT query_id, client_ip, concat(user, db) as concat FROM test.test1;"); - createTable("create table test.tbl_using_a\n" + - "(\n" + - " k1 int,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 3 " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"" + - ");"); - - createTable("create table test.tbl_using_b\n" + - "(\n" + - " k1 int,\n" + - " k2 int,\n" + - " k3 int \n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 3 " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"" + - ");"); + createTable("create table test.tbl_using_a\n" + + "(\n" + + " k1 int,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 3 " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"" + + ");"); + + createTable("create table test.tbl_using_b\n" + + "(\n" + + " k1 int,\n" + + " k2 int,\n" + + " k3 int \n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 3 " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"" + + ");"); } @Test @@ -424,24 +424,24 @@ public void testFunctionViewGroupingSet() throws Exception { @Test public void testBitmapInsertInto() throws Exception { assertSQLPlanOrErrorMsgContains( - "INSERT INTO test.bitmap_table (id, id2) VALUES (1001, to_bitmap(1000)), (1001, to_bitmap(2000));", - "OLAP TABLE SINK"); + "INSERT INTO test.bitmap_table (id, id2) VALUES (1001, to_bitmap(1000)), (1001, to_bitmap(2000));", + "OLAP TABLE SINK"); assertSQLPlanOrErrorMsgContains( - "insert into test.bitmap_table select id, bitmap_union(id2) from test.bitmap_table_2 group by id;", - "OLAP TABLE SINK", - "bitmap_union", - "1:AGGREGATE", - "0:OlapScanNode"); + "insert into test.bitmap_table select id, bitmap_union(id2) from test.bitmap_table_2 group by id;", + "OLAP TABLE SINK", + "bitmap_union", + "1:AGGREGATE", + "0:OlapScanNode"); assertSQLPlanOrErrorMsgContains( - "insert into test.bitmap_table select id, id2 from test.bitmap_table_2;", - "OLAP TABLE SINK", - "OUTPUT EXPRS:`id` | `id2`", - "0:OlapScanNode"); + "insert into test.bitmap_table select id, id2 from test.bitmap_table_2;", + "OLAP TABLE SINK", + "OUTPUT EXPRS:`id` | `id2`", + "0:OlapScanNode"); assertSQLPlanOrErrorMsgContains("insert into test.bitmap_table select id, id from test.bitmap_table_2;", - "bitmap column require the function return type is BITMAP"); + "bitmap column require the function return type is BITMAP"); } @Test @@ -691,14 +691,14 @@ public void testDateTypeCastSyntax() throws Exception { @Test public void testDateTypeEquality() throws Exception { // related to Github issue #3309 - String loadStr = "load label test.app_profile_20200306\n" + - "(DATA INFILE('filexxx')INTO TABLE app_profile partition (p_20200306)\n" + - "COLUMNS TERMINATED BY '\\t'\n" + - "(app_name,package_name,age,gender,level,city,model,brand,hours,use_num,use_time,start_times)\n" + - "SET\n" + - "(event_date = default_value('2020-03-06'))) \n" + - "PROPERTIES ( 'max_filter_ratio'='0.0001' );\n" + - ""; + String loadStr = "load label test.app_profile_20200306\n" + + "(DATA INFILE('filexxx')INTO TABLE app_profile partition (p_20200306)\n" + + "COLUMNS TERMINATED BY '\\t'\n" + + "(app_name,package_name,age,gender,level,city,model,brand,hours,use_num,use_time,start_times)\n" + + "SET\n" + + "(event_date = default_value('2020-03-06'))) \n" + + "PROPERTIES ( 'max_filter_ratio'='0.0001' );\n" + + ""; LoadStmt loadStmt = (LoadStmt) parseAndAnalyzeStmt(loadStr); Catalog.getCurrentCatalog().getLoadManager().createLoadJobV1FromStmt(loadStmt, EtlJobType.HADOOP, System.currentTimeMillis()); @@ -711,37 +711,37 @@ public void testJoinPredicateTransitivity() throws Exception { ConnectContext.get().getSessionVariable().setEnableInferPredicate(true); /* TODO: commit on_clause and where_clause Cross-identification // test left join : left table where binary predicate - String sql = "select join1.id\n" + - "from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "where join1.id > 1;"; + String sql = "select join1.id\n" + + "from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "where join1.id > 1;"; String explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); // test left join: left table where in predicate - sql = "select join1.id\n" + - "from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "where join1.id in (2);"; + sql = "select join1.id\n" + + "from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "where join1.id in (2);"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` IN (2)")); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` IN (2)")); // test left join: left table where between predicate - sql = "select join1.id\n" + - "from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "where join1.id BETWEEN 1 AND 2;"; + sql = "select join1.id\n" + + "from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "where join1.id BETWEEN 1 AND 2;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` >= 1, `join1`.`id` <= 2")); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` >= 1, `join2`.`id` <= 2")); */ // test left join: left table join predicate, left table couldn't push down - String sql = "select *\n from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "and join1.id > 1;"; + String sql = "select *\n from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "and join1.id > 1;"; String explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("other join predicates: `join1`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join1`.`id` > 1")); @@ -750,111 +750,111 @@ public void testJoinPredicateTransitivity() throws Exception { // test left join: right table where predicate. // If we eliminate outer join, we could push predicate down to join1 and join2. // Currently, we push predicate to join1 and keep join predicate for join2 - sql = "select *\n from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "where join2.id > 1;"; + sql = "select *\n from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "where join2.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertFalse(explainString.contains("other join predicates: `join2`.`id` > 1")); */ // test left join: right table join predicate, only push down right table - sql = "select *\n from join1\n" + - "left join join2 on join1.id = join2.id\n" + - "and join2.id > 1;"; + sql = "select *\n from join1\n" + + "left join join2 on join1.id = join2.id\n" + + "and join2.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join1`.`id` > 1")); /* // test inner join: left table where predicate, both push down left table and right table - sql = "select *\n from join1\n" + - "join join2 on join1.id = join2.id\n" + - "where join1.id > 1;"; + sql = "select *\n from join1\n" + + "join join2 on join1.id = join2.id\n" + + "where join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); */ // test inner join: left table join predicate, both push down left table and right table - sql = "select *\n from join1\n" + - "join join2 on join1.id = join2.id\n" + - "and join1.id > 1;"; + sql = "select *\n from join1\n" + + "join join2 on join1.id = join2.id\n" + + "and join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); /* // test inner join: right table where predicate, both push down left table and right table - sql = "select *\n from join1\n" + - "join join2 on join1.id = join2.id\n" + - "where join2.id > 1;"; + sql = "select *\n from join1\n" + + "join join2 on join1.id = join2.id\n" + + "where join2.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); */ // test inner join: right table join predicate, both push down left table and right table - sql = "select *\n from join1\n" + - "join join2 on join1.id = join2.id\n" + - "and 1 < join2.id;"; + sql = "select *\n from join1\n" + + + "join join2 on join1.id = join2.id\n" + "and 1 < join2.id;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); - sql = "select *\n from join1\n" + - "join join2 on join1.id = join2.value\n" + - "and join2.value in ('abc');"; + sql = "select *\n from join1\n" + + "join join2 on join1.id = join2.value\n" + + "and join2.value in ('abc');"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertFalse(explainString.contains("'abc' is not a number")); Assert.assertFalse(explainString.contains("`join1`.`value` IN ('abc')")); // test anti join, right table join predicate, only push to right table - sql = "select *\n from join1\n" + - "left anti join join2 on join1.id = join2.id\n" + - "and join2.id > 1;"; + sql = "select *\n from join1\n" + + "left anti join join2 on join1.id = join2.id\n" + + "and join2.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join1`.`id` > 1")); // test semi join, right table join predicate, only push to right table - sql = "select *\n from join1\n" + - "left semi join join2 on join1.id = join2.id\n" + - "and join2.id > 1;"; + sql = "select *\n from join1\n" + + "left semi join join2 on join1.id = join2.id\n" + + "and join2.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); // test anti join, left table join predicate, left table couldn't push down - sql = "select *\n from join1\n" + - "left anti join join2 on join1.id = join2.id\n" + - "and join1.id > 1;"; + sql = "select *\n from join1\n" + + "left anti join join2 on join1.id = join2.id\n" + + "and join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("other join predicates: `join1`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join1`.`id` > 1")); // test semi join, left table join predicate, only push to left table - sql = "select *\n from join1\n" + - "left semi join join2 on join1.id = join2.id\n" + - "and join1.id > 1;"; + sql = "select *\n from join1\n" + + "left semi join join2 on join1.id = join2.id\n" + + "and join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); /* // test anti join, left table where predicate, only push to left table - sql = "select join1.id\n" + - "from join1\n" + - "left anti join join2 on join1.id = join2.id\n" + - "where join1.id > 1;"; + sql = "select join1.id\n" + + "from join1\n" + + "left anti join join2 on join1.id = join2.id\n" + + "where join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join2`.`id` > 1")); // test semi join, left table where predicate, only push to left table - sql = "select join1.id\n" + - "from join1\n" + - "left semi join join2 on join1.id = join2.id\n" + - "where join1.id > 1;"; + sql = "select join1.id\n" + + "from join1\n" + + "left semi join join2 on join1.id = join2.id\n" + + "where join1.id > 1;"; explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); Assert.assertFalse(explainString.contains("PREDICATES: `join2`.`id` > 1")); @@ -989,11 +989,11 @@ public void testConvertCaseWhenToConstant() throws Exception { @Test public void testJoinPredicateTransitivityWithSubqueryInWhereClause() throws Exception { connectContext.setDatabase("default_cluster:test"); - String sql = "SELECT *\n" + - "FROM test.pushdown_test\n" + - "WHERE 0 < (\n" + - " SELECT MAX(k9)\n" + - " FROM test.pushdown_test);"; + String sql = "SELECT *\n" + + + "FROM test.pushdown_test\n" + + "WHERE 0 < (\n" + + " SELECT MAX(k9)\n" + " FROM test.pushdown_test);"; String explainString = getSQLPlanOrErrorMsg("explain " + sql); Assert.assertTrue(explainString.contains("PLAN FRAGMENT")); Assert.assertTrue(explainString.contains("CROSS JOIN")); @@ -1105,13 +1105,15 @@ public void testBucketShuffleJoin() throws Exception { } // single partition - String queryStr = "explain select * from test.jointest t1, test.bucket_shuffle1 t2 where t1.k1 = t2.k1 and t1.k1 = t2.k2"; + String queryStr = "explain select * from test.jointest t1, test.bucket_shuffle1 t2" + + " where t1.k1 = t2.k1 and t1.k1 = t2.k2"; String explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE")); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); // not bucket shuffle join do not support different type - queryStr = "explain select * from test.jointest t1, test.bucket_shuffle1 t2 where cast (t1.k1 as tinyint) = t2.k1 and t1.k1 = t2.k2"; + queryStr = "explain select * from test.jointest t1, test.bucket_shuffle1 t2" + + " where cast (t1.k1 as tinyint) = t2.k1 and t1.k1 = t2.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); @@ -1121,32 +1123,37 @@ public void testBucketShuffleJoin() throws Exception { Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); // multi partition, should not be bucket shuffle join - queryStr = "explain select * from test.jointest t1, test.bucket_shuffle2 t2 where t1.k1 = t2.k1 and t1.k1 = t2.k2"; + queryStr = "explain select * from test.jointest t1, test.bucket_shuffle2 t2" + + " where t1.k1 = t2.k1 and t1.k1 = t2.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); // left table is colocate table, should be bucket shuffle - queryStr = "explain select * from test.colocate1 t1, test.bucket_shuffle2 t2 where t1.k1 = t2.k1 and t1.k1 = t2.k2"; + queryStr = "explain select * from test.colocate1 t1, test.bucket_shuffle2 t2" + + " where t1.k1 = t2.k1 and t1.k1 = t2.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); // support recurse of bucket shuffle join - queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2 on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3 " + - "on t2.k1 = t3.k1 and t2.k2 = t3.k2"; + queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2" + + " on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3" + + " on t2.k1 = t3.k1 and t2.k2 = t3.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t3`.`k1`, `t3`.`k2`")); // support recurse of bucket shuffle because t4 join t2 and join column name is same as t2 distribute column name - queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2 on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3 " + - "on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t2.k1 and t4.k1 = t2.k2"; + queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2" + + " on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3" + + " on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t2.k1 and t4.k1 = t2.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t4`.`k1`, `t4`.`k1`")); // some column name in join expr t3 join t4 and t1 distribute column name, so should not be bucket shuffle join - queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2 on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3 " + - "on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t3.k1 and t4.k2 = t3.k2"; + queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2" + + " on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3" + + " on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t3.k1 and t4.k2 = t3.k2"; explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t4`.`k1`, `t4`.`k1`")); @@ -1853,7 +1860,7 @@ public void testCompoundPredicateWriteRule() throws Exception { // false or (expr and true) ==> expr String sql10 = "select * from test.test1 where (2=-2) OR (query_time=0 AND 1=1);"; - String explainString10 = getSQLPlanOrErrorMsg( "EXPLAIN " + sql10); + String explainString10 = getSQLPlanOrErrorMsg("EXPLAIN " + sql10); Assert.assertTrue(explainString10.contains("PREDICATES: `query_time` = 0")); } @@ -1861,24 +1868,28 @@ public void testCompoundPredicateWriteRule() throws Exception { public void testOutfile() throws Exception { connectContext.setDatabase("default_cluster:test"); Config.enable_outfile_to_local = true; - createTable("CREATE TABLE test.`outfile1` (\n" + - " `date` date NOT NULL,\n" + - " `road_code` int(11) NOT NULL DEFAULT \"-1\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`date`, `road_code`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE(`date`)\n" + - "(PARTITION v2x_ads_lamp_source_percent_statistic_20210929 VALUES [('2021-09-29'), ('2021-09-30')))\n" + - "DISTRIBUTED BY HASH(`road_code`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"); + createTable("CREATE TABLE test.`outfile1` (\n" + + " `date` date NOT NULL,\n" + + " `road_code` int(11) NOT NULL DEFAULT \"-1\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`date`, `road_code`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE(`date`)\n" + + "(PARTITION v2x_ads_lamp_source_percent_statistic_20210929 VALUES [('2021-09-29'), ('2021-09-30')))\n" + + "DISTRIBUTED BY HASH(`road_code`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"); // test after query rewrite, outfile still work - String sql = "select * from test.outfile1 where `date` between '2021-10-07' and '2021-10-11'" + - "INTO OUTFILE \"file:///tmp/1_\" FORMAT AS CSV PROPERTIES ( \"column_separator\" = \",\", \"line_delimiter\" = \"\\n\", \"max_file_size\" = \"500MB\" );"; + String sql = "select * from test.outfile1 where `date` between '2021-10-07' and '2021-10-11'" + + "INTO OUTFILE \"file:///tmp/1_\" FORMAT AS CSV PROPERTIES (" + + " \"column_separator\" = \",\"," + + " \"line_delimiter\" = \"\\n\"," + + " \"max_file_size\" = \"500MB\" );"; String explainStr = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainStr.contains("PREDICATES: `date` >= '2021-10-07 00:00:00', `date` <= '2021-10-11 00:00:00'")); + Assert.assertTrue(explainStr.contains("PREDICATES: `date` >= '2021-10-07 00:00:00'," + + " `date` <= '2021-10-11 00:00:00'")); } // Fix: issue-#7929 @@ -1888,31 +1899,32 @@ public void testEmptyNodeWithOuterJoinAndAnalyticFunction() throws Exception { String createDbStmtStr = "create database issue7929;"; CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); Catalog.getCurrentCatalog().createDb(createDbStmt); - createTable(" CREATE TABLE issue7929.`t1` (\n" + - " `k1` int(11) NULL COMMENT \"\",\n" + - " `k2` int(11) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"); - createTable("CREATE TABLE issue7929.`t2` (\n" + - " `j1` int(11) NULL COMMENT \"\",\n" + - " `j2` int(11) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`j1`, `j2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`j1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"); - String sql = "select * from issue7929.t1 left join (select max(j1) over() as x from issue7929.t2)a on t1.k1=a.x where 1=0;"; + createTable(" CREATE TABLE issue7929.`t1` (\n" + + " `k1` int(11) NULL COMMENT \"\",\n" + + " `k2` int(11) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"); + createTable("CREATE TABLE issue7929.`t2` (\n" + + " `j1` int(11) NULL COMMENT \"\",\n" + + " `j2` int(11) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`j1`, `j2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`j1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"); + String sql = "select * from issue7929.t1 left join (select max(j1) over() as x from issue7929.t2) a" + + " on t1.k1 = a.x where 1 = 0;"; String explainStr = getSQLPlanOrErrorMsg(sql, true); Assert.assertTrue(explainStr.contains("4:EMPTYSET")); Assert.assertTrue(explainStr.contains("tuple ids: 0 1 5")); @@ -1924,15 +1936,15 @@ public void testGroupingSetOutOfBoundError() throws Exception { String createDbStmtStr = "create database issue1111;"; CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); Catalog.getCurrentCatalog().createDb(createDbStmt); - createTable("CREATE TABLE issue1111.`test1` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\"\n" + - ");"); + createTable("CREATE TABLE issue1111.`test1` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\"\n" + + ");"); String sql = "SELECT k1 ,GROUPING(k2) FROM issue1111.test1 GROUP BY CUBE (k1) ORDER BY k1"; String explainStr = getSQLPlanOrErrorMsg(sql, true); System.out.println(explainStr); @@ -1971,32 +1983,32 @@ public void testGroupingSets() throws Exception { String createDbStmtStr = "create database issue7971;"; CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); Catalog.getCurrentCatalog().createDb(createDbStmt); - createTable("CREATE TABLE issue7971.`t` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k2` smallint(6) NULL COMMENT \"\",\n" + - " `k3` smallint(6) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"); - createTable("CREATE TABLE issue7971.`t1` (\n" + - " `k1` tinyint(4) NULL COMMENT \"\",\n" + - " `k21` smallint(6) NULL COMMENT \"\",\n" + - " `k31` smallint(6) NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ")"); + createTable("CREATE TABLE issue7971.`t` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k2` smallint(6) NULL COMMENT \"\",\n" + + " `k3` smallint(6) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"); + createTable("CREATE TABLE issue7971.`t1` (\n" + + " `k1` tinyint(4) NULL COMMENT \"\",\n" + + " `k21` smallint(6) NULL COMMENT \"\",\n" + + " `k31` smallint(6) NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ")"); String sql = "SELECT k1, k2, GROUPING(k1), GROUPING(k2), SUM(k3) FROM issue7971.t GROUP BY GROUPING SETS ( (k1, k2), (k2), (k1), ( ) );"; String explainStr = getSQLPlanOrErrorMsg(sql); Assert.assertTrue(explainStr.contains("REPEAT_NODE")); @@ -2015,8 +2027,8 @@ public void testQueryWithUsingClause() throws Exception { String iSql2 = "explain insert into test.tbl_using_b values(1,3,1),(3,1,1),(4,1,1),(5,2,1)"; getSQLPlanOrErrorMsg(iSql1); getSQLPlanOrErrorMsg(iSql2); - String qSQL = "explain select t1.* from test.tbl_using_a t1 join test.tbl_using_b t2 using(k1,k2) where t1.k1 " + - "between 1 and 3 and t2.k3 between 1+0 and 3+0"; + String qSQL = "explain select t1.* from test.tbl_using_a t1 join test.tbl_using_b t2 using(k1,k2) where t1.k1 " + + "between 1 and 3 and t2.k3 between 1+0 and 3+0"; try { getSQLPlanOrErrorMsg(qSQL); } catch (AnalysisException e) { @@ -2027,27 +2039,27 @@ public void testQueryWithUsingClause() throws Exception { @Test public void testResultExprs() throws Exception { connectContext.setDatabase("default_cluster:test"); - createTable("CREATE TABLE test.result_exprs (\n" + - " `aid` int(11) NULL,\n" + - " `bid` int(11) NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`aid`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`aid`) BUCKETS 7\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"business_key_column_name\" = \"\",\n" + - "\"storage_medium\" = \"HDD\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");\n"); - String queryStr = "EXPLAIN VERBOSE INSERT INTO result_exprs\n" + - "SELECT a.aid,\n" + - " b.bid\n" + - "FROM\n" + - " (SELECT 3 AS aid)a\n" + - "RIGHT JOIN\n" + - " (SELECT 4 AS bid)b ON (a.aid=b.bid)\n"; + createTable("CREATE TABLE test.result_exprs (\n" + + " `aid` int(11) NULL,\n" + + " `bid` int(11) NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`aid`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`aid`) BUCKETS 7\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"business_key_column_name\" = \"\",\n" + + "\"storage_medium\" = \"HDD\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");\n"); + String queryStr = "EXPLAIN VERBOSE INSERT INTO result_exprs\n" + + "SELECT a.aid,\n" + + " b.bid\n" + + "FROM\n" + + " (SELECT 3 AS aid)a\n" + + "RIGHT JOIN\n" + + " (SELECT 4 AS bid)b ON (a.aid=b.bid)\n"; String explainString = getSQLPlanOrErrorMsg(queryStr); Assert.assertFalse(explainString.contains("OUTPUT EXPRS:3 | 4")); System.out.println(explainString); @@ -2057,18 +2069,18 @@ public void testResultExprs() throws Exception { @Test public void testInsertIntoSelect() throws Exception { connectContext.setDatabase("default_cluster:test"); - createTable("CREATE TABLE test.`decimal_tb` (\n" + - " `k1` decimal(1, 0) NULL COMMENT \"\",\n" + - " `v1` decimal(1, 0) SUM NULL COMMENT \"\",\n" + - " `v2` decimal(1, 0) MAX NULL COMMENT \"\",\n" + - " `v3` decimal(1, 0) MIN NULL COMMENT \"\",\n" + - " `v4` decimal(1, 0) REPLACE NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "AGGREGATE KEY(`k1`)\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + - "PROPERTIES (\n" + - "\"replication_allocation\" = \"tag.location.default: 1\"\n" + - ")"); + createTable("CREATE TABLE test.`decimal_tb` (\n" + + " `k1` decimal(1, 0) NULL COMMENT \"\",\n" + + " `v1` decimal(1, 0) SUM NULL COMMENT \"\",\n" + + " `v2` decimal(1, 0) MAX NULL COMMENT \"\",\n" + + " `v3` decimal(1, 0) MIN NULL COMMENT \"\",\n" + + " `v4` decimal(1, 0) REPLACE NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "AGGREGATE KEY(`k1`)\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\"\n" + + ")"); String sql = "explain insert into test.decimal_tb select 1, 10, 1, 1, 1;"; String explainString = getSQLPlanOrErrorMsg(sql); Assert.assertTrue(explainString.contains("1 | 10 | 1 | 1 | 1")); @@ -2077,21 +2089,21 @@ public void testInsertIntoSelect() throws Exception { @Test public void testOutJoinWithOnFalse() throws Exception { connectContext.setDatabase("default_cluster:test"); - createTable("create table out_join_1\n" + - "(\n" + - " k1 int,\n" + - " v int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");"); - - createTable("create table out_join_2\n" + - "(\n" + - " k1 int,\n" + - " v int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + - "PROPERTIES(\"replication_num\" = \"1\");"); + createTable("create table out_join_1\n" + + "(\n" + + " k1 int,\n" + + " v int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");"); + + createTable("create table out_join_2\n" + + "(\n" + + " k1 int,\n" + + " v int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");"); String sql = "explain select * from out_join_1 left join out_join_2 on out_join_1.k1 = out_join_2.k1 and 1=2;"; String explainString = getSQLPlanOrErrorMsg(sql); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java index 767c7605683fef..0a87605347eda0 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java @@ -181,15 +181,15 @@ private static void setProperty(String sql) throws Exception { public void test() throws Exception { // create table with default tag - String createStr = "create table test.tbl1\n" + - "(k1 date, k2 int)\n" + - "partition by range(k1)\n" + - "(\n" + - " partition p1 values less than(\"2021-06-01\"),\n" + - " partition p2 values less than(\"2021-07-01\"),\n" + - " partition p3 values less than(\"2021-08-01\")\n" + - ")\n" + - "distributed by hash(k2) buckets 10;"; + String createStr = "create table test.tbl1\n" + + "(k1 date, k2 int)\n" + + "partition by range(k1)\n" + + "(\n" + + " partition p1 values less than(\"2021-06-01\"),\n" + + " partition p2 values less than(\"2021-07-01\"),\n" + + " partition p3 values less than(\"2021-08-01\")\n" + + ")\n" + + "distributed by hash(k2) buckets 10;"; ExceptionChecker.expectThrowsNoException(() -> createTable(createStr)); Database db = Catalog.getCurrentCatalog().getDbNullable("default_cluster:test"); OlapTable tbl = (OlapTable) db.getTableNullable("tbl1"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java index 284ef2cc16c393..295037dfb66236 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java @@ -153,16 +153,16 @@ public void testGenerateRuntimeFiltersMode() { Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 4); Assert.assertEquals(hashJoinNode.getRuntimeFilters().size(), 4); Assert.assertEquals(lhsScanNode.getRuntimeFilters().size(), 4); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); clearRuntimeFilterState(); new Expectations() { @@ -177,16 +177,16 @@ public void testGenerateRuntimeFiltersMode() { Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 4); Assert.assertEquals(hashJoinNode.getRuntimeFilters().size(), 4); Assert.assertEquals(lhsScanNode.getRuntimeFilters().size(), 4); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); clearRuntimeFilterState(); new Expectations() { @@ -281,12 +281,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -319,12 +319,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -339,12 +339,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -359,14 +359,14 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 3); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 3); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 3); @@ -381,10 +381,10 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 1); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 1); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 1); @@ -399,12 +399,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -419,12 +419,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -439,14 +439,14 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 3); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 3); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 3); @@ -461,12 +461,12 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 2); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 2); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 2); @@ -481,14 +481,14 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 3); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 3); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 3); @@ -504,14 +504,14 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 3); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 3); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 3); @@ -526,16 +526,16 @@ public void testGenerateRuntimeFiltersType() { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + - ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + - ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF001[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF002[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`" + + ", RF003[in_or_bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF001[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF002[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`" + + ", RF003[in_or_bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 4); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 4); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 4); @@ -621,15 +621,15 @@ public void testGenerateRuntimeFiltersSize() { Assert.assertEquals(analyzer.getAssignedRuntimeFilter().get(0).toThrift().getBloomFilterSizeBytes(), 16777216); // Use ndv and fpp to calculate the minimum space required for bloom filter - Assert.assertEquals(1L << - RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.05), 1048576); - Assert.assertEquals(1L << - RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.1), 1048576); - Assert.assertEquals(1L << - RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.3), 524288); - Assert.assertEquals(1L << - RuntimeFilter.getMinLogSpaceForBloomFilter(10000000, 0.1), 8388608); - Assert.assertEquals(1L << - RuntimeFilter.getMinLogSpaceForBloomFilter(1000, 0.1), 1024); + Assert.assertEquals(1048576, 1L + << RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.05)); + Assert.assertEquals(1048576, 1L + << RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.1)); + Assert.assertEquals(524288, 1L + << RuntimeFilter.getMinLogSpaceForBloomFilter(1000000, 0.3)); + Assert.assertEquals(8388608, 1L + << RuntimeFilter.getMinLogSpaceForBloomFilter(10000000, 0.1)); + Assert.assertEquals(1024, 1L + << RuntimeFilter.getMinLogSpaceForBloomFilter(1000, 0.1)); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java index 545f4c7c9dcdfc..44e6e2ec62f19e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java @@ -191,15 +191,24 @@ public void testNormal() throws UserException { TStreamLoadPutRequest request = getBaseRequest(); StreamLoadScanNode scanNode = getStreamLoadScanNode(dstDesc, request); - new Expectations() {{ - dstTable.getBaseSchema(); result = columns; - dstTable.getBaseSchema(anyBoolean); result = columns; - dstTable.getFullSchema(); result = columns; - dstTable.getColumn("k1"); result = columns.get(0); - dstTable.getColumn("k2"); result = columns.get(1); - dstTable.getColumn("v1"); result = columns.get(2); - dstTable.getColumn("v2"); result = columns.get(3); - }}; + new Expectations() { + { + dstTable.getBaseSchema(); + result = columns; + dstTable.getBaseSchema(anyBoolean); + result = columns; + dstTable.getFullSchema(); + result = columns; + dstTable.getColumn("k1"); + result = columns.get(0); + dstTable.getColumn("k2"); + result = columns.get(1); + dstTable.getColumn("v1"); + result = columns.get(2); + dstTable.getColumn("v2"); + result = columns.get(3); + } + }; scanNode.init(analyzer); scanNode.finalize(analyzer); scanNode.getNodeExplainString("", TExplainLevel.NORMAL); @@ -404,7 +413,7 @@ public void testHllColumnsNoHllHash() throws UserException { TStreamLoadPutRequest request = getBaseRequest(); request.setFileType(TFileType.FILE_LOCAL); request.setColumns("k1,k2, v1=hll_hash1(k2)"); - StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request); + StreamLoadTask.fromTStreamLoadPutRequest(request); StreamLoadScanNode scanNode = getStreamLoadScanNode(dstDesc, request); scanNode.init(analyzer); @@ -492,29 +501,31 @@ public void testColumnsUnknownRef() throws UserException, UserException { } } - new Expectations() {{ - dstTable.getBaseSchema(); - minTimes = 0; - result = columns; - dstTable.getBaseSchema(anyBoolean); - minTimes = 0; - result = columns; - dstTable.getFullSchema(); - minTimes = 0; - result = columns; - dstTable.getColumn("k1"); - minTimes = 0; - result = columns.get(0); - dstTable.getColumn("k2"); - minTimes = 0; - result = columns.get(1); - dstTable.getColumn("v1"); - minTimes = 0; - result = columns.get(2); - dstTable.getColumn("v2"); - minTimes = 0; - result = columns.get(3); - }}; + new Expectations() { + { + dstTable.getBaseSchema(); + minTimes = 0; + result = columns; + dstTable.getBaseSchema(anyBoolean); + minTimes = 0; + result = columns; + dstTable.getFullSchema(); + minTimes = 0; + result = columns; + dstTable.getColumn("k1"); + minTimes = 0; + result = columns.get(0); + dstTable.getColumn("k2"); + minTimes = 0; + result = columns.get(1); + dstTable.getColumn("v1"); + minTimes = 0; + result = columns.get(2); + dstTable.getColumn("v2"); + minTimes = 0; + result = columns.get(3); + } + }; TStreamLoadPutRequest request = getBaseRequest(); request.setColumns("k1,k2,v1, v2=k3"); @@ -642,29 +653,31 @@ public void testWhereUnknownRef() throws UserException, UserException { } } - new Expectations() {{ - dstTable.getBaseSchema(); - minTimes = 0; - result = columns; - dstTable.getBaseSchema(anyBoolean); - minTimes = 0; - result = columns; - dstTable.getFullSchema(); - minTimes = 0; - result = columns; - dstTable.getColumn("k1"); - minTimes = 0; - result = columns.get(0); - dstTable.getColumn("k2"); - minTimes = 0; - result = columns.get(1); - dstTable.getColumn("v1"); - minTimes = 0; - result = columns.get(2); - dstTable.getColumn("v2"); - minTimes = 0; - result = columns.get(3); - }}; + new Expectations() { + { + dstTable.getBaseSchema(); + minTimes = 0; + result = columns; + dstTable.getBaseSchema(anyBoolean); + minTimes = 0; + result = columns; + dstTable.getFullSchema(); + minTimes = 0; + result = columns; + dstTable.getColumn("k1"); + minTimes = 0; + result = columns.get(0); + dstTable.getColumn("k2"); + minTimes = 0; + result = columns.get(1); + dstTable.getColumn("v1"); + minTimes = 0; + result = columns.get(2); + dstTable.getColumn("v2"); + minTimes = 0; + result = columns.get(3); + } + }; TStreamLoadPutRequest request = getBaseRequest(); request.setColumns("k1,k2,v1, v2=k1"); @@ -695,29 +708,31 @@ public void testWhereNotBool() throws UserException { } } - new Expectations() {{ - dstTable.getBaseSchema(); - minTimes = 0; - result = columns; - dstTable.getBaseSchema(anyBoolean); - minTimes = 0; - result = columns; - dstTable.getFullSchema(); - minTimes = 0; - result = columns; - dstTable.getColumn("k1"); - minTimes = 0; - result = columns.get(0); - dstTable.getColumn("k2"); - minTimes = 0; - result = columns.get(1); - dstTable.getColumn("v1"); - minTimes = 0; - result = columns.get(2); - dstTable.getColumn("v2"); - minTimes = 0; - result = columns.get(3); - }}; + new Expectations() { + { + dstTable.getBaseSchema(); + minTimes = 0; + result = columns; + dstTable.getBaseSchema(anyBoolean); + minTimes = 0; + result = columns; + dstTable.getFullSchema(); + minTimes = 0; + result = columns; + dstTable.getColumn("k1"); + minTimes = 0; + result = columns.get(0); + dstTable.getColumn("k2"); + minTimes = 0; + result = columns.get(1); + dstTable.getColumn("v1"); + minTimes = 0; + result = columns.get(2); + dstTable.getColumn("v2"); + minTimes = 0; + result = columns.get(3); + } + }; TStreamLoadPutRequest request = getBaseRequest(); request.setColumns("k1,k2,v1, v2=k1"); @@ -823,8 +838,10 @@ public void testSequenceColumnWithoutSetColumns() throws UserException { dstTable.hasSequenceCol(); result = true; - dstTable.getBaseSchema(anyBoolean); result = columns; - dstTable.getFullSchema(); result = columns; + dstTable.getBaseSchema(anyBoolean); + result = columns; + dstTable.getFullSchema(); + result = columns; dstTable.getColumn("k1"); result = columns.stream().filter(c -> c.getName().equals("k1")).findFirst().get(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java index 8c3eb90a9256c4..ae8e17a645791a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java @@ -60,8 +60,8 @@ public static void setUp() throws Exception { createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); - createTblStmtStr = "create table db1.table_for_view (k1 int, k2 int, k3 varchar(100)) distributed by hash(k1)" + - "properties('replication_num' = '1');"; + createTblStmtStr = "create table db1.table_for_view (k1 int, k2 int, k3 varchar(100)) distributed by hash(k1)" + + "properties('replication_num' = '1');"; createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/UpdatePlannerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/UpdatePlannerTest.java index 8af6745f907da8..35ab8d3eacb526 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/UpdatePlannerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/UpdatePlannerTest.java @@ -84,7 +84,7 @@ public void testComputeOutputExprsWithShadowColumnAndSetExpr(@Injectable OlapTab fullSchema.add(v1); fullSchema.add(shadowV1); - new Expectations(){ + new Expectations() { { targetTable.getFullSchema(); result = fullSchema; @@ -146,7 +146,7 @@ public void testNewColumnBySchemaChange(@Injectable OlapTable targetTable, fullSchema.add(v1); fullSchema.add(newV2); - new Expectations(){ + new Expectations() { { targetTable.getFullSchema(); result = fullSchema; diff --git a/fe/fe-core/src/test/java/org/apache/doris/plugin/PluginZipTest.java b/fe/fe-core/src/test/java/org/apache/doris/plugin/PluginZipTest.java index c90eac79eebb2b..228fe3e1f0446e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/plugin/PluginZipTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/plugin/PluginZipTest.java @@ -119,7 +119,7 @@ public void testDownloadAndValidateZipMd5Error() { public void testDownloadAndValidateZipIOException() { PluginZip util = new PluginZip("http://io-exception", null); try { - Path zipPath = util.downloadRemoteZip(PluginTestUtil.getTestPath("target")); + util.downloadRemoteZip(PluginTestUtil.getTestPath("target")); } catch (Exception e) { Assert.assertTrue(e instanceof IOException); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java index 48d506cce82c5c..55cbbb0cb9b182 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java @@ -59,43 +59,43 @@ public class ConnectProcessorTest { @BeforeClass public static void setUpClass() { // Init Database packet - { + { // CHECKSTYLE IGNORE THIS LINE MysqlSerializer serializer = MysqlSerializer.newInstance(); serializer.writeInt1(2); serializer.writeEofString("testCluster:testDb"); initDbPacket = serializer.toByteBuffer(); - } + } // CHECKSTYLE IGNORE THIS LINE // Ping packet - { + { // CHECKSTYLE IGNORE THIS LINE MysqlSerializer serializer = MysqlSerializer.newInstance(); serializer.writeInt1(14); pingPacket = serializer.toByteBuffer(); - } + } // CHECKSTYLE IGNORE THIS LINE // Quit packet - { + { // CHECKSTYLE IGNORE THIS LINE MysqlSerializer serializer = MysqlSerializer.newInstance(); serializer.writeInt1(1); quitPacket = serializer.toByteBuffer(); - } + } // CHECKSTYLE IGNORE THIS LINE // Query packet - { + { // CHECKSTYLE IGNORE THIS LINE MysqlSerializer serializer = MysqlSerializer.newInstance(); serializer.writeInt1(3); serializer.writeEofString("select * from a"); queryPacket = serializer.toByteBuffer(); - } + } // CHECKSTYLE IGNORE THIS LINE // Field list packet - { + { // CHECKSTYLE IGNORE THIS LINE MysqlSerializer serializer = MysqlSerializer.newInstance(); serializer.writeInt1(4); serializer.writeNulTerminateString("testTbl"); serializer.writeEofString(""); fieldListPacket = serializer.toByteBuffer(); - } + } // CHECKSTYLE IGNORE THIS LINE statistics = statistics.toBuilder().setCpuMs(0L).setScanRows(0).setScanBytes(0).build(); MetricRepo.init(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java index 37dd235740317b..67def92ade92d5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java @@ -124,7 +124,7 @@ public void testComputeColocateJoinInstanceParam() { // check whether one instance have 3 tablet to scan for (FInstanceExecParam instanceExecParam : params.instanceExecParams) { - for (List tempScanRangeParamsList :instanceExecParam.perNodeScanRanges.values()) { + for (List tempScanRangeParamsList : instanceExecParam.perNodeScanRanges.values()) { Assert.assertEquals(3, tempScanRangeParamsList.size()); } } @@ -576,9 +576,9 @@ public void testComputeScanRangeAssignmentByScheduler() { locations.add(tScanRangeLocations1); Deencapsulation.invoke(coordinator, "computeScanRangeAssignmentByScheduler", olapScanNode, locations, assignment, assignedBytesPerHost); - for (Map.Entry entry:assignment.entrySet()) { + for (Map.Entry entry : assignment.entrySet()) { Map> addr = (HashMap>) entry.getValue(); - for (Map.Entry item:addr.entrySet()) { + for (Map.Entry item : addr.entrySet()) { List params = (List) item.getValue(); Assert.assertTrue(params.size() == 2); } @@ -629,16 +629,16 @@ public void testGetExecHostPortForFragmentIDAndBucketSeq() { locations.add(tScanRangeLocations); HashMap assignedBytesPerHost = Maps.newHashMap(); - Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq",tScanRangeLocations, + Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq", tScanRangeLocations, planFragmentId, 1, assignedBytesPerHost); - Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq",tScanRangeLocations, + Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq", tScanRangeLocations, planFragmentId, 2, assignedBytesPerHost); - Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq",tScanRangeLocations, + Deencapsulation.invoke(coordinator, "getExecHostPortForFragmentIDAndBucketSeq", tScanRangeLocations, planFragmentId, 3, assignedBytesPerHost); List hosts = new ArrayList<>(); - for (Map.Entry item:assignedBytesPerHost.entrySet()) { - Assert.assertTrue((Long)item.getValue() == 1); - TNetworkAddress addr = (TNetworkAddress)item.getKey(); + for (Map.Entry item : assignedBytesPerHost.entrySet()) { + Assert.assertTrue((Long) item.getValue() == 1); + TNetworkAddress addr = (TNetworkAddress) item.getKey(); hosts.add(addr.hostname); } Assert.assertTrue(hosts.size() == 3); @@ -646,7 +646,6 @@ public void testGetExecHostPortForFragmentIDAndBucketSeq() { @Test public void testBucketShuffleWithUnaliveBackend() { - Coordinator coordinator = new Coordinator(context, analyzer, planner); PlanFragmentId planFragmentId = new PlanFragmentId(1); // each olaptable bucket have the same TScanRangeLocations, be id is {0, 1, 2} TScanRangeLocations tScanRangeLocations = new TScanRangeLocations(); @@ -699,7 +698,7 @@ public void testBucketShuffleWithUnaliveBackend() { Assert.assertTrue(backendIdBucketCountMap.size() == 2); List backendIds = new ArrayList(); List counts = new ArrayList(); - for (Map.Entry item:backendIdBucketCountMap.entrySet()) { + for (Map.Entry item : backendIdBucketCountMap.entrySet()) { backendIds.add(item.getKey()); counts.add(item.getValue()); } @@ -792,17 +791,15 @@ public void testComputeScanRangeAssignment() { Set ids3 = new HashSet<>(); ids1.add(3); fragmentIdToScanNodeIds.put(planFragmentId, ids3); - Deencapsulation.setField(coordinator,"fragmentIdToScanNodeIds", fragmentIdToScanNodeIds); + Deencapsulation.setField(coordinator, "fragmentIdToScanNodeIds", fragmentIdToScanNodeIds); //fragmentExecParamsMap Map fragmentExecParamsMap = Maps.newHashMap(); fragmentExecParamsMap.put(planFragmentId, new FragmentExecParams(fragment)); fragmentExecParamsMap.put(planFragmentId2, new FragmentExecParams(fragment2)); fragmentExecParamsMap.put(planFragmentId3, new FragmentExecParams(fragment3)); - Deencapsulation.setField(coordinator,"fragmentExecParamsMap", fragmentExecParamsMap); + Deencapsulation.setField(coordinator, "fragmentExecParamsMap", fragmentExecParamsMap); - //bucketShuffleJoinController - BucketShuffleJoinController bucketShuffleJoinController = new BucketShuffleJoinController(fragmentIdToScanNodeIds); // init all backend Backend backend0 = new Backend(0, "0.0.0.0", 9060); backend0.setAlive(true); diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/JournalObservableTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/JournalObservableTest.java index a76b5a1f91c3bd..743cb0c8117388 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/JournalObservableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/JournalObservableTest.java @@ -35,21 +35,21 @@ public void testUpperBound() { JournalObserver observer6 = new JournalObserver(6L); // empty - { + { // CHECKSTYLE IGNORE THIS LINE Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), 0, 1L)); - } + } // CHECKSTYLE IGNORE THIS LINE // one element - { + { // CHECKSTYLE IGNORE THIS LINE elements.add(observer2); int size = elements.size(); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); - } + } // CHECKSTYLE IGNORE THIS LINE // same element - { + { // CHECKSTYLE IGNORE THIS LINE elements.clear(); elements.add(observer2); elements.add(observer6); @@ -72,10 +72,10 @@ public void testUpperBound() { Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); elements.remove(observer42); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - } + } // CHECKSTYLE IGNORE THIS LINE // same element 2 - { + { // CHECKSTYLE IGNORE THIS LINE elements.clear(); elements.add(observer4); elements.add(observer41); @@ -86,10 +86,10 @@ public void testUpperBound() { Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); elements.remove(observer4); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - } + } // CHECKSTYLE IGNORE THIS LINE // odd elements - { + { // CHECKSTYLE IGNORE THIS LINE elements.clear(); elements.add(observer2); elements.add(observer2); @@ -98,10 +98,6 @@ public void testUpperBound() { elements.add(observer6); elements.add(observer6); int size = elements.size(); -// System.out.println("size=" + size); -// for(int i = 0; i < size; i ++) { -// System.out.println("array " + i + " = " + ((MasterOpExecutor)elements.get(i)).getTargetJournalId()); -// } Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); @@ -109,9 +105,9 @@ public void testUpperBound() { Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 5L)); Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 6L)); Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 7L)); - } + } // CHECKSTYLE IGNORE THIS LINE // even elements - { + { // CHECKSTYLE IGNORE THIS LINE elements.clear(); elements.add(observer2); elements.add(observer2); @@ -128,8 +124,8 @@ public void testUpperBound() { Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 5L)); Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 6L)); Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 7L)); - } - { + } // CHECKSTYLE IGNORE THIS LINE + { // CHECKSTYLE IGNORE THIS LINE CountDownLatch latch = new CountDownLatch(1); System.out.println(latch.getCount()); @@ -141,7 +137,7 @@ public void testUpperBound() { latch.countDown(); System.out.println(latch.getCount()); - } + } // CHECKSTYLE IGNORE THIS LINE System.out.println("success"); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java index a26ceb9643ba36..d5eddb3e5ff325 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java @@ -19,7 +19,6 @@ import org.apache.doris.analysis.Analyzer; import org.apache.doris.analysis.PartitionValue; -import org.apache.doris.analysis.SelectStmt; import org.apache.doris.analysis.SqlParser; import org.apache.doris.analysis.SqlScanner; import org.apache.doris.analysis.StatementBase; @@ -439,8 +438,8 @@ private OlapTable createEventTable() { } private View createEventView1() { - String originStmt = "select eventdate, COUNT(userid) FROM appevent WHERE " + - "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"; + String originStmt = "select eventdate, COUNT(userid) FROM appevent WHERE " + + "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"; View view = new View(30000L, "view1", null); view.setInlineViewDefWithSqlMode(originStmt, 0L); return view; @@ -454,16 +453,16 @@ private View createEventView2() { } private View createEventView3() { - String originStmt = "select eventdate, COUNT(userid) FROM appevent WHERE " + - "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate"; + String originStmt = "select eventdate, COUNT(userid) FROM appevent WHERE " + + "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate"; View view = new View(30002L, "view3", null); view.setInlineViewDefWithSqlMode(originStmt, 0L); return view; } private View createEventNestedView() { - String originStmt = "select eventdate, COUNT(userid) FROM view2 WHERE " + - "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"; + String originStmt = "select eventdate, COUNT(userid) FROM view2 WHERE " + + "eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"; View view = new View(30003L, "view4", null); view.setInlineViewDefWithSqlMode(originStmt, 0L); return view; @@ -565,8 +564,8 @@ public void testWithinMinTime() throws Exception { public void testPartitionModel() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(DISTINCT userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-15\" GROUP BY eventdate" + "SELECT eventdate, COUNT(DISTINCT userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-15\" GROUP BY eventdate" ); ArrayList selectedPartitionIds @@ -638,8 +637,8 @@ public void testPartitionIntTypeSql() throws Exception { public void testSimpleCacheSql() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-15\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-15\" GROUP BY eventdate" ); ArrayList selectedPartitionIds @@ -649,7 +648,6 @@ public void testSimpleCacheSql() throws Exception { CacheAnalyzer ca = new CacheAnalyzer(context, parseStmt, scanNodes); ca.checkCacheMode(1579053661000L); //2020-1-15 10:01:01 Assert.assertEquals(ca.getCacheMode(), CacheMode.Partition); //assert cache model first - SelectStmt selectStmt = (SelectStmt) parseStmt; try { PartitionCache cache = (PartitionCache) ca.getCache(); @@ -682,8 +680,8 @@ public void testSimpleCacheSql() throws Exception { public void testHitSqlCache() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -697,8 +695,8 @@ public void testHitSqlCache() throws Exception { public void testHitPartPartition() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -743,8 +741,8 @@ public void testHitPartPartition() throws Exception { public void testNoUpdatePartition() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -785,8 +783,8 @@ public void testNoUpdatePartition() throws Exception { public void testUpdatePartition() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-15\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-15\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); @@ -834,9 +832,9 @@ public void testUpdatePartition() throws Exception { public void testRewriteMultiPredicate1() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>\"2020-01-11\" and " + - "eventdate<\"2020-01-16\"" + - " and eventid=1 GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>\"2020-01-11\" and " + + "eventdate<\"2020-01-16\"" + + " and eventid=1 GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); @@ -878,10 +876,10 @@ public void testRewriteMultiPredicate1() throws Exception { public void testRewriteJoin() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT appevent.eventdate, country, COUNT(appevent.userid) FROM appevent" + - " INNER JOIN userprofile ON appevent.userid = userprofile.userid" + - " WHERE appevent.eventdate>=\"2020-01-12\" and appevent.eventdate<=\"2020-01-15\"" + - " and eventid=1 GROUP BY appevent.eventdate, country" + "SELECT appevent.eventdate, country, COUNT(appevent.userid) FROM appevent" + + " INNER JOIN userprofile ON appevent.userid = userprofile.userid" + + " WHERE appevent.eventdate>=\"2020-01-12\" and appevent.eventdate<=\"2020-01-15\"" + + " and eventid=1 GROUP BY appevent.eventdate, country" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); @@ -911,8 +909,8 @@ public void testRewriteJoin() throws Exception { cache.rewriteSelectStmt(newRangeList); sql = ca.getRewriteStmt().getWhereClause().toSql(); LOG.warn("Join rewrite={}", sql); - Assert.assertEquals(sql, "`appevent`.`eventdate` >= '2020-01-14'" + - " AND `appevent`.`eventdate` <= '2020-01-15' AND `eventid` = 1"); + Assert.assertEquals(sql, "`appevent`.`eventdate` >= '2020-01-14'" + + " AND `appevent`.`eventdate` <= '2020-01-15' AND `eventid` = 1"); } catch (Exception e) { LOG.warn("Join ex={}", e); Assert.fail(e.getMessage()); @@ -923,9 +921,9 @@ public void testRewriteJoin() throws Exception { public void testSubSelect() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, sum(pv) FROM (SELECT eventdate, COUNT(userid) AS pv FROM appevent WHERE " + - "eventdate>\"2020-01-11\" AND eventdate<\"2020-01-16\"" + - " AND eventid=1 GROUP BY eventdate) tbl GROUP BY eventdate" + "SELECT eventdate, sum(pv) FROM (SELECT eventdate, COUNT(userid) AS pv FROM appevent WHERE " + + "eventdate>\"2020-01-11\" AND eventdate<\"2020-01-16\"" + + " AND eventid=1 GROUP BY eventdate) tbl GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); @@ -939,10 +937,10 @@ public void testSubSelect() throws Exception { cache.rewriteSelectStmt(null); LOG.warn("Sub nokey={}", cache.getNokeyStmt().toSql()); Assert.assertEquals(cache.getNokeyStmt().toSql(), - "SELECT `eventdate` AS `eventdate`, sum(`pv`) AS `sum(``pv``)` FROM (" + - "SELECT `eventdate` AS `eventdate`, count(`userid`) AS `pv` FROM " + - "`testCluster:testDb`.`appevent` WHERE `eventid` = 1" + - " GROUP BY `eventdate`) tbl GROUP BY `eventdate`"); + "SELECT `eventdate` AS `eventdate`, sum(`pv`) AS `sum(``pv``)` FROM (" + + "SELECT `eventdate` AS `eventdate`, count(`userid`) AS `pv` FROM " + + "`testCluster:testDb`.`appevent` WHERE `eventid` = 1" + + " GROUP BY `eventdate`) tbl GROUP BY `eventdate`"); PartitionRange range = cache.getPartitionRange(); boolean flag = range.analytics(); @@ -961,11 +959,11 @@ public void testSubSelect() throws Exception { sql = ca.getRewriteStmt().toSql(); LOG.warn("Sub rewrite={}", sql); Assert.assertEquals(sql, - "SELECT `eventdate` AS `eventdate`, sum(`pv`) AS `sum(``pv``)` FROM (" + - "SELECT `eventdate` AS `eventdate`, count(`userid`) AS `pv` FROM " + - "`testCluster:testDb`.`appevent` WHERE " + - "`eventdate` > '2020-01-13' AND `eventdate` < '2020-01-16' AND `eventid` = 1 GROUP BY " + - "`eventdate`) tbl GROUP BY `eventdate`"); + "SELECT `eventdate` AS `eventdate`, sum(`pv`) AS `sum(``pv``)` FROM (" + + "SELECT `eventdate` AS `eventdate`, count(`userid`) AS `pv` FROM " + + "`testCluster:testDb`.`appevent` WHERE " + + "`eventdate` > '2020-01-13' AND `eventdate` < '2020-01-16' AND `eventid` = 1 GROUP BY " + + "`eventdate`) tbl GROUP BY `eventdate`"); } catch (Exception e) { LOG.warn("sub ex={}", e); Assert.fail(e.getMessage()); @@ -976,8 +974,8 @@ public void testSubSelect() throws Exception { public void testNotHitPartition() throws Exception { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -1005,8 +1003,8 @@ public void testNotHitPartition() throws Exception { public void testSqlCacheKey() { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -1017,17 +1015,17 @@ public void testSqlCacheKey() { SqlCache sqlCache = (SqlCache) ca.getCache(); String cacheKey = sqlCache.getSqlWithViewStmt(); - Assert.assertEquals(cacheKey, "SELECT `eventdate` AS `eventdate`, count(`userid`) " + - "AS `count(``userid``)` FROM `testCluster:testDb`.`appevent` WHERE `eventdate` " + - ">= '2020-01-12 00:00:00' AND `eventdate` <= '2020-01-14 00:00:00' GROUP BY `eventdate`|"); + Assert.assertEquals(cacheKey, "SELECT `eventdate` AS `eventdate`, count(`userid`) " + + "AS `count(``userid``)` FROM `testCluster:testDb`.`appevent` WHERE `eventdate` " + + ">= '2020-01-12 00:00:00' AND `eventdate` <= '2020-01-14 00:00:00' GROUP BY `eventdate`|"); } @Test public void testSqlCacheKeyWithChineseChar() { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" and city=\"北京\" GROUP BY eventdate" + "SELECT eventdate, COUNT(userid) FROM appevent WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" and city=\"北京\" GROUP BY eventdate" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -1054,21 +1052,21 @@ public void testSqlCacheKeyWithView() { SqlCache sqlCache = (SqlCache) ca.getCache(); String cacheKey = sqlCache.getSqlWithViewStmt(); - Assert.assertEquals(cacheKey, "SELECT `testDb`.`view1`.`eventdate` AS `eventdate`, `testDb`.`view1`." + - "`count(`userid`)` AS `count(``userid``)` FROM `testDb`.`view1`|select eventdate, COUNT(userid) " + - "FROM appevent WHERE eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"); + Assert.assertEquals(cacheKey, "SELECT `testDb`.`view1`.`eventdate` AS `eventdate`, `testDb`.`view1`." + + "`count(`userid`)` AS `count(``userid``)` FROM `testDb`.`view1`|select eventdate, COUNT(userid) " + + "FROM appevent WHERE eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-14\" GROUP BY eventdate"); } @Test public void testSqlCacheKeyWithSubSelectView() { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "select origin.eventdate as eventdate, origin.userid as userid\n" + - "from (\n" + - " select view2.eventdate as eventdate, view2.userid as userid \n" + - " from testDb.view2 view2 \n" + - " where view2.eventdate >=\"2020-01-12\" and view2.eventdate <= \"2020-01-14\"\n" + - ") origin" + "select origin.eventdate as eventdate, origin.userid as userid\n" + + "from (\n" + + " select view2.eventdate as eventdate, view2.userid as userid \n" + + " from testDb.view2 view2 \n" + + " where view2.eventdate >=\"2020-01-12\" and view2.eventdate <= \"2020-01-14\"\n" + + ") origin" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L); @@ -1079,10 +1077,10 @@ public void testSqlCacheKeyWithSubSelectView() { SqlCache sqlCache = (SqlCache) ca.getCache(); String cacheKey = sqlCache.getSqlWithViewStmt(); - Assert.assertEquals(cacheKey, "SELECT `origin`.`eventdate` AS `eventdate`, `origin`.`userid` AS " + - "`userid` FROM (SELECT `view2`.`eventdate` AS `eventdate`, `view2`.`userid` AS `userid` FROM " + - "`testDb`.`view2` view2 WHERE `view2`.`eventdate` >= '2020-01-12 00:00:00' AND `view2`.`eventdate`" + - " <= '2020-01-14 00:00:00') origin|select eventdate, userid FROM appevent"); + Assert.assertEquals(cacheKey, "SELECT `origin`.`eventdate` AS `eventdate`, `origin`.`userid` AS " + + "`userid` FROM (SELECT `view2`.`eventdate` AS `eventdate`, `view2`.`userid` AS `userid` FROM " + + "`testDb`.`view2` view2 WHERE `view2`.`eventdate` >= '2020-01-12 00:00:00' AND `view2`.`eventdate`" + + " <= '2020-01-14 00:00:00') origin|select eventdate, userid FROM appevent"); } @Test @@ -1101,10 +1099,10 @@ public void testPartitionCacheKeyWithView() { cache.rewriteSelectStmt(null); Assert.assertEquals(cache.getNokeyStmt().getWhereClause(), null); - Assert.assertEquals(cache.getSqlWithViewStmt(), "SELECT `testDb`.`view3`.`eventdate` AS " + - "`eventdate`, `testDb`.`view3`.`count(`userid`)` AS `count(``userid``)` FROM " + - "`testDb`.`view3`|select eventdate, COUNT(userid) FROM appevent WHERE eventdate>=" + - "\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate"); + Assert.assertEquals(cache.getSqlWithViewStmt(), "SELECT `testDb`.`view3`.`eventdate` AS " + + "`eventdate`, `testDb`.`view3`.`count(`userid`)` AS `count(``userid``)` FROM " + + "`testDb`.`view3`|select eventdate, COUNT(userid) FROM appevent WHERE eventdate>=" + + "\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate"); } catch (Exception e) { LOG.warn("ex={}", e); Assert.fail(e.getMessage()); @@ -1115,12 +1113,12 @@ public void testPartitionCacheKeyWithView() { public void testPartitionCacheKeyWithSubSelectView() { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "select origin.eventdate as eventdate, origin.cnt as cnt\n" + - "from (\n" + - " SELECT eventdate, COUNT(userid) as cnt \n" + - " FROM view2 \n" + - " WHERE eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate\n" + - ") origin" + "select origin.eventdate as eventdate, origin.cnt as cnt\n" + + "from (\n" + + " SELECT eventdate, COUNT(userid) as cnt \n" + + " FROM view2 \n" + + " WHERE eventdate>=\"2020-01-12\" and eventdate<=\"2020-01-15\" GROUP BY eventdate\n" + + ") origin" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); @@ -1135,9 +1133,9 @@ public void testPartitionCacheKeyWithSubSelectView() { cache.rewriteSelectStmt(null); Assert.assertEquals(cache.getNokeyStmt().getWhereClause(), null); Assert.assertEquals(cache.getSqlWithViewStmt(), - "SELECT `origin`.`eventdate` AS `eventdate`, `origin`.`cnt` AS `cnt` FROM (SELECT " + - " `eventdate` AS `eventdate`, count(`userid`) AS `cnt` FROM " + - "`testDb`.`view2` GROUP BY `eventdate`) origin|select eventdate, userid FROM appevent"); + "SELECT `origin`.`eventdate` AS `eventdate`, `origin`.`cnt` AS `cnt` FROM (SELECT " + + " `eventdate` AS `eventdate`, count(`userid`) AS `cnt` FROM " + + "`testDb`.`view2` GROUP BY `eventdate`) origin|select eventdate, userid FROM appevent"); } catch (Exception e) { LOG.warn("ex={}", e); Assert.fail(e.getMessage()); @@ -1157,24 +1155,24 @@ public void testSqlCacheKeyWithNestedView() { SqlCache sqlCache = (SqlCache) ca.getCache(); String cacheKey = sqlCache.getSqlWithViewStmt(); - Assert.assertEquals(cacheKey, "SELECT `testDb`.`view4`.`eventdate` AS `eventdate`, " + - "`testDb`.`view4`.`count(`userid`)` AS `count(``userid``)` FROM `testDb`.`view4`|select " + - "eventdate, COUNT(userid) FROM view2 WHERE eventdate>=\"2020-01-12\" and " + - "eventdate<=\"2020-01-14\" GROUP BY eventdate|select eventdate, userid FROM appevent"); + Assert.assertEquals(cacheKey, "SELECT `testDb`.`view4`.`eventdate` AS `eventdate`, " + + "`testDb`.`view4`.`count(`userid`)` AS `count(``userid``)` FROM `testDb`.`view4`|select " + + "eventdate, COUNT(userid) FROM view2 WHERE eventdate>=\"2020-01-12\" and " + + "eventdate<=\"2020-01-14\" GROUP BY eventdate|select eventdate, userid FROM appevent"); } @Test public void testCacheLocalViewMultiOperand() { Catalog.getCurrentSystemInfo(); StatementBase parseStmt = parseSql( - "SELECT COUNT(userid)\n" + - "FROM (\n" + - " (SELECT userid FROM userprofile\n" + - " INTERSECT\n" + - " SELECT userid FROM userprofile)\n" + - " UNION\n" + - " SELECT userid FROM userprofile\n" + - ") as tmp" + "SELECT COUNT(userid)\n" + + "FROM (\n" + + " (SELECT userid FROM userprofile\n" + + " INTERSECT\n" + + " SELECT userid FROM userprofile)\n" + + " UNION\n" + + " SELECT userid FROM userprofile\n" + + ") as tmp" ); ArrayList selectedPartitionIds = Lists.newArrayList(20200112L, 20200113L, 20200114L, 20200115L); diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java index 8facfc27ed482b..6f1737dc6eae06 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java @@ -90,9 +90,6 @@ public void setUp() throws Exception { // mock index 1 MaterializedIndex index1 = new MaterializedIndex(); - // mock index 2 - MaterializedIndex index2 = new MaterializedIndex(); - // mock partition Partition partition = Deencapsulation.newInstance(Partition.class); new Expectations(partition) { @@ -256,7 +253,7 @@ public void testShowDbPriv() throws AnalysisException { ShowDbStmt stmt = new ShowDbStmt(null); ShowExecutor executor = new ShowExecutor(ctx, stmt); ctx.setCatalog(AccessTestUtil.fetchBlockCatalog()); - ShowResultSet resultSet = executor.execute(); + executor.execute(); } @Test @@ -399,7 +396,7 @@ public void testShowCreateNoDb() throws AnalysisException { ShowCreateDbStmt stmt = new ShowCreateDbStmt("testCluster:emptyDb"); ShowExecutor executor = new ShowExecutor(ctx, stmt); - ShowResultSet resultSet = executor.execute(); + executor.execute(); Assert.fail("No exception throws."); } @@ -408,7 +405,7 @@ public void testShowCreateNoDb() throws AnalysisException { public void testShowCreateTableEmptyDb() throws AnalysisException { ShowCreateTableStmt stmt = new ShowCreateTableStmt(new TableName("testCluster:emptyDb", "testTable")); ShowExecutor executor = new ShowExecutor(ctx, stmt); - ShowResultSet resultSet = executor.execute(); + executor.execute(); Assert.fail("No Exception throws."); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java index 5ba2e7237b4ecd..945c0fec5a19f5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java @@ -533,8 +533,8 @@ public void testStmtWithUserInfo(@Mocked StatementBase stmt, @Mocked ConnectCont Deencapsulation.setField(stmtExecutor, "parsedStmt", null); Deencapsulation.setField(stmtExecutor, "originStmt", new OriginStatement("show databases;", 1)); stmtExecutor.execute(); - StatementBase newstmt = (StatementBase)Deencapsulation.getField(stmtExecutor, "parsedStmt"); - Assert.assertTrue(newstmt.getUserInfo() != null); + StatementBase newstmt = Deencapsulation.getField(stmtExecutor, "parsedStmt"); + Assert.assertNotNull(newstmt.getUserInfo()); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/VariableMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/VariableMgrTest.java index b248bcc0cb7c63..5d9156c52061b8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/VariableMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/VariableMgrTest.java @@ -24,7 +24,6 @@ import org.apache.doris.analysis.StringLiteral; import org.apache.doris.analysis.SysVariableDesc; import org.apache.doris.catalog.Catalog; -import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; import org.apache.doris.common.UserException; @@ -225,9 +224,7 @@ public void testInvalidTimeZoneOffset() throws UserException { } @Test(expected = DdlException.class) - public void testReadOnly() throws AnalysisException, DdlException { - SysVariableDesc desc = new SysVariableDesc("version_comment"); - + public void testReadOnly() throws DdlException { // Set global variable SetVar setVar = new SetVar(SetType.SESSION, "version_comment", null); VariableMgr.setVar(null, setVar); diff --git a/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java b/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java index c76dc37dc4f1ed..c4443ba7470546 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java @@ -95,7 +95,7 @@ public void testSerializeTagManager() throws IOException, AnalysisException { TagManager tagManager = new TagManager(); tagManager.addResourceTag(1L, Tag.create(Tag.TYPE_LOCATION, "rack1")); - tagManager.addResourceTags(2L, TagSet.create( Tag.create(Tag.TYPE_LOCATION, "rack1"), Tag.create(Tag.TYPE_LOCATION, "rack2"))); + tagManager.addResourceTags(2L, TagSet.create(Tag.create(Tag.TYPE_LOCATION, "rack1"), Tag.create(Tag.TYPE_LOCATION, "rack2"))); tagManager.write(out); out.flush(); out.close(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/resource/TagTest.java b/fe/fe-core/src/test/java/org/apache/doris/resource/TagTest.java index 45130269c07e64..81300c4677a442 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/resource/TagTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/resource/TagTest.java @@ -53,7 +53,7 @@ public void testTagSet1() throws AnalysisException { Map map = Maps.newHashMap(); map.put("location", "zone1, zone2"); map.put("unknown", "tag1, tag2"); - TagSet tagSet = TagSet.create(map); + TagSet.create(map); } @Test(expected = AnalysisException.class) @@ -61,7 +61,7 @@ public void testTagSet2() throws AnalysisException { Map map = Maps.newHashMap(); map.put("location", "zone1, zone2"); map.put("type", "tag1, _tag2"); - TagSet tagSet = TagSet.create(map); + TagSet.create(map); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java index 7ed2284c2a6814..ac089c7423616f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java @@ -171,52 +171,52 @@ public void testWideCommonFactorsAndCommonFactors() throws Exception { // TPC-H Q19 @Test public void testComplexQuery() throws Exception { - String createTableSQL = "CREATE TABLE `lineitem` (\n" + - " `l_orderkey` int(11) NOT NULL COMMENT \"\",\n" + - " `l_partkey` int(11) NOT NULL COMMENT \"\",\n" + - " `l_suppkey` int(11) NOT NULL COMMENT \"\",\n" + - " `l_linenumber` int(11) NOT NULL COMMENT \"\",\n" + - " `l_quantity` decimal(15, 2) NOT NULL COMMENT \"\",\n" + - " `l_extendedprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" + - " `l_discount` decimal(15, 2) NOT NULL COMMENT \"\",\n" + - " `l_tax` decimal(15, 2) NOT NULL COMMENT \"\",\n" + - " `l_returnflag` char(1) NOT NULL COMMENT \"\",\n" + - " `l_linestatus` char(1) NOT NULL COMMENT \"\",\n" + - " `l_shipdate` date NOT NULL COMMENT \"\",\n" + - " `l_commitdate` date NOT NULL COMMENT \"\",\n" + - " `l_receiptdate` date NOT NULL COMMENT \"\",\n" + - " `l_shipinstruct` char(25) NOT NULL COMMENT \"\",\n" + - " `l_shipmode` char(10) NOT NULL COMMENT \"\",\n" + - " `l_comment` varchar(44) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`l_orderkey`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 2\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"; + String createTableSQL = "CREATE TABLE `lineitem` (\n" + + " `l_orderkey` int(11) NOT NULL COMMENT \"\",\n" + + " `l_partkey` int(11) NOT NULL COMMENT \"\",\n" + + " `l_suppkey` int(11) NOT NULL COMMENT \"\",\n" + + " `l_linenumber` int(11) NOT NULL COMMENT \"\",\n" + + " `l_quantity` decimal(15, 2) NOT NULL COMMENT \"\",\n" + + " `l_extendedprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" + + " `l_discount` decimal(15, 2) NOT NULL COMMENT \"\",\n" + + " `l_tax` decimal(15, 2) NOT NULL COMMENT \"\",\n" + + " `l_returnflag` char(1) NOT NULL COMMENT \"\",\n" + + " `l_linestatus` char(1) NOT NULL COMMENT \"\",\n" + + " `l_shipdate` date NOT NULL COMMENT \"\",\n" + + " `l_commitdate` date NOT NULL COMMENT \"\",\n" + + " `l_receiptdate` date NOT NULL COMMENT \"\",\n" + + " `l_shipinstruct` char(25) NOT NULL COMMENT \"\",\n" + + " `l_shipmode` char(10) NOT NULL COMMENT \"\",\n" + + " `l_comment` varchar(44) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`l_orderkey`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 2\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"; dorisAssert.withTable(createTableSQL); - createTableSQL = "CREATE TABLE `part` (\n" + - " `p_partkey` int(11) NOT NULL COMMENT \"\",\n" + - " `p_name` varchar(55) NOT NULL COMMENT \"\",\n" + - " `p_mfgr` char(25) NOT NULL COMMENT \"\",\n" + - " `p_brand` char(10) NOT NULL COMMENT \"\",\n" + - " `p_type` varchar(25) NOT NULL COMMENT \"\",\n" + - " `p_size` int(11) NOT NULL COMMENT \"\",\n" + - " `p_container` char(10) NOT NULL COMMENT \"\",\n" + - " `p_retailprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" + - " `p_comment` varchar(23) NOT NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`p_partkey`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 2\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\",\n" + - "\"storage_format\" = \"V2\"\n" + - ");"; + createTableSQL = "CREATE TABLE `part` (\n" + + " `p_partkey` int(11) NOT NULL COMMENT \"\",\n" + + " `p_name` varchar(55) NOT NULL COMMENT \"\",\n" + + " `p_mfgr` char(25) NOT NULL COMMENT \"\",\n" + + " `p_brand` char(10) NOT NULL COMMENT \"\",\n" + + " `p_type` varchar(25) NOT NULL COMMENT \"\",\n" + + " `p_size` int(11) NOT NULL COMMENT \"\",\n" + + " `p_container` char(10) NOT NULL COMMENT \"\",\n" + + " `p_retailprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" + + " `p_comment` varchar(23) NOT NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`p_partkey`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 2\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\"\n" + + ");"; dorisAssert.withTable(createTableSQL); String query = "select sum(l_extendedprice* (1 - l_discount)) as revenue " + "from lineitem, part " diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java index f9f726dfca1944..20baf3d39e64d3 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java @@ -166,7 +166,7 @@ public void dateFormatUtilTest() { Assert.assertEquals("13", FEFunctions.dateFormat(testDate, new StringLiteral("%k")).getStringValue()); Assert.assertEquals("1", FEFunctions.dateFormat(testDate, new StringLiteral("%l")).getStringValue()); Assert.assertEquals("January", FEFunctions.dateFormat(testDate, new StringLiteral("%M")).getStringValue()); - Assert.assertEquals( "01", FEFunctions.dateFormat(testDate, new StringLiteral("%m")).getStringValue()); + Assert.assertEquals("01", FEFunctions.dateFormat(testDate, new StringLiteral("%m")).getStringValue()); Assert.assertEquals("PM", FEFunctions.dateFormat(testDate, new StringLiteral("%p")).getStringValue()); Assert.assertEquals("01:04:05 PM", FEFunctions.dateFormat(testDate, new StringLiteral("%r")).getStringValue()); Assert.assertEquals("05", FEFunctions.dateFormat(testDate, new StringLiteral("%S")).getStringValue()); @@ -180,7 +180,7 @@ public void dateFormatUtilTest() { Assert.assertEquals("foo", FEFunctions.dateFormat(testDate, new StringLiteral("foo")).getStringValue()); Assert.assertEquals("g", FEFunctions.dateFormat(testDate, new StringLiteral("%g")).getStringValue()); Assert.assertEquals("4", FEFunctions.dateFormat(testDate, new StringLiteral("%4")).getStringValue()); - Assert.assertEquals("2001 02" ,FEFunctions.dateFormat(testDate, new StringLiteral("%x %v")).getStringValue()); + Assert.assertEquals("2001 02", FEFunctions.dateFormat(testDate, new StringLiteral("%x %v")).getStringValue()); } catch (AnalysisException e) { e.printStackTrace(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteDateLiteralRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteDateLiteralRuleTest.java index 1964a3b1af0e0d..68610859f06ae2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteDateLiteralRuleTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteDateLiteralRuleTest.java @@ -41,7 +41,6 @@ public void before(ConnectContext ctx) throws Exception { } public void after() throws Exception { - String dropDbSql = "drop database if exists " + DB_NAME; dorisAssert.dropDB(DB_NAME); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/mvrewrite/CountFieldToSumTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/mvrewrite/CountFieldToSumTest.java index b9a89fb6b6dd6f..5eee61c2903a4f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/mvrewrite/CountFieldToSumTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/mvrewrite/CountFieldToSumTest.java @@ -40,7 +40,7 @@ public class CountFieldToSumTest { public void testCountDistinct(@Injectable Analyzer analyzer, @Injectable FunctionCallExpr functionCallExpr) { TableName tableName = new TableName("db1", "table1"); - SlotRef slotRef = new SlotRef(tableName,"c1"); + SlotRef slotRef = new SlotRef(tableName, "c1"); List params = Lists.newArrayList(); params.add(slotRef); diff --git a/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java b/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java index 768a3c556a94ff..ebd38858fbd68c 100755 --- a/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java @@ -30,7 +30,7 @@ public class ExecuteEnvTest { @Test public void testGetInstance() { Set tds = new HashSet(); - for (int i = 0 ;i < threadMaxNum; i++) { + for (int i = 0; i < threadMaxNum; i++) { Thread td = new Thread(new MyTest(i, oids)); tds.add(td); td.start(); @@ -44,7 +44,7 @@ public void testGetInstance() { } } for (int i = 1; i < threadMaxNum; i++) { - Assert.assertEquals(oids[i-1], oids[i]); + Assert.assertEquals(oids[i - 1], oids[i]); } } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java b/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java index 71691b67de20b2..db47288484a973 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java @@ -187,7 +187,7 @@ public void toThriftTest() throws Exception { // storageMediaMigrationTask TAgentTaskRequest request7 = - (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, storageMediaMigrationTask); + (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, storageMediaMigrationTask); Assert.assertEquals(TTaskType.STORAGE_MEDIUM_MIGRATE, request7.getTaskType()); Assert.assertEquals(storageMediaMigrationTask.getSignature(), request7.getSignature()); Assert.assertNotNull(request7.getStorageMediumMigrateReq()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/task/LoadEtlTaskTest.java b/fe/fe-core/src/test/java/org/apache/doris/task/LoadEtlTaskTest.java index eb8a1b8f730d49..4380eaae9a4f62 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/task/LoadEtlTaskTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/task/LoadEtlTaskTest.java @@ -188,7 +188,6 @@ public void testRunEtlTask(@Mocked DppScheduler dppScheduler) throws Exception { // verify finished Assert.assertEquals(100, job.getProgress()); - long expectVersion = partition.getVisibleVersion() + 1; Assert.assertEquals(-1, job.getIdToTableLoadInfo().get(tableId) .getIdToPartitionLoadInfo().get(partitionId).getVersion()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java b/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java index cc9f500cec76f6..bb90912c58491c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java @@ -69,6 +69,7 @@ public void testSubmit() { try { Thread.sleep(1000); } catch (InterruptedException e) { + // CHECKSTYLE IGNORE THIS LINE } // The submission order of the same signature should be equal to the execution order diff --git a/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java index 06acd52dd6e5b0..eab20c6f935505 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java @@ -56,7 +56,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; - +import mockit.Injectable; +import mockit.Mocked; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.junit.Assert; import org.junit.Before; @@ -67,8 +68,6 @@ import java.util.Map; import java.util.Set; import java.util.UUID; -import mockit.Injectable; -import mockit.Mocked; public class GlobalTransactionMgrTest { @@ -359,7 +358,7 @@ LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1") Deencapsulation.setField(masterTransMgr.getDatabaseTransactionMgr(CatalogTestUtil.testDbId1), "idToRunningTransactionState", idToTransactionState); Table testTable1 = masterCatalog.getDbOrMetaException(CatalogTestUtil.testDbId1).getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(1L, Lists.newArrayList(testTable1), 1L, transTablets, txnCommitAttachment); - RoutineLoadStatistic jobStatistic = Deencapsulation.getField(routineLoadJob,"jobStatistic"); + RoutineLoadStatistic jobStatistic = Deencapsulation.getField(routineLoadJob, "jobStatistic"); Assert.assertEquals(Long.valueOf(101), Deencapsulation.getField(jobStatistic, "currentTotalRows")); Assert.assertEquals(Long.valueOf(1), Deencapsulation.getField(jobStatistic, "currentErrorRows")); @@ -431,7 +430,7 @@ LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1") masterTransMgr.commitTransaction(1L, Lists.newArrayList(testTable1), 1L, transTablets, txnCommitAttachment); // current total rows and error rows will be reset after job pause, so here they should be 0. - RoutineLoadStatistic jobStatistic = Deencapsulation.getField(routineLoadJob,"jobStatistic"); + RoutineLoadStatistic jobStatistic = Deencapsulation.getField(routineLoadJob, "jobStatistic"); Assert.assertEquals(Long.valueOf(0), Deencapsulation.getField(jobStatistic, "currentTotalRows")); Assert.assertEquals(Long.valueOf(0), Deencapsulation.getField(jobStatistic, "currentErrorRows")); Assert.assertEquals(Long.valueOf(111L), diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java index 0460d19e7b100a..4c91afab33f235 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java @@ -131,8 +131,9 @@ public void testCreateDbAndTable() throws Exception { Catalog.getCurrentCatalog().createDb(createDbStmt); System.out.println(Catalog.getCurrentCatalog().getDbNames()); // 3. create table tbl1 - String createTblStmtStr = "create table db1.tbl1(k1 int) distributed by hash(k1) buckets 3 properties('replication_num' = '3'," + - "'colocate_with' = 'g1');"; + String createTblStmtStr = "create table db1.tbl1(k1 int) distributed by hash(k1) buckets 3" + + " properties('replication_num' = '3'," + + "'colocate_with' = 'g1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); // must set replicas' path hash, or the tablet scheduler won't work diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/DorisAssert.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/DorisAssert.java index ad395117a5023c..1d74e494710fc3 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/DorisAssert.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/DorisAssert.java @@ -209,7 +209,7 @@ private String internalExecute(String sql) throws Exception { return explainString; } - public Planner internalExecuteOneAndGetPlan() throws Exception{ + public Planner internalExecuteOneAndGetPlan() throws Exception { SqlScanner input = new SqlScanner(new StringReader(sql), ctx.getSessionVariable().getSqlMode()); SqlParser parser = new SqlParser(input); List stmts = SqlParserUtils.getMultiStmts(parser); diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java index 13dcc36f65d2d9..efab82c9aa1f40 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java @@ -327,11 +327,12 @@ public TCheckStorageFormatResult checkStorageFormat() throws TException { // The default Brpc service. public static class DefaultPBackendServiceImpl extends PBackendServiceGrpc.PBackendServiceImplBase { - @Override - public void transmitData(InternalService.PTransmitDataParams request, StreamObserver responseObserver) { - responseObserver.onNext(InternalService.PTransmitDataResult.newBuilder() - .setStatus(Types.PStatus.newBuilder().setStatusCode(0)).build()); - responseObserver.onCompleted(); + @Override + public void transmitData(InternalService.PTransmitDataParams request, + StreamObserver responseObserver) { + responseObserver.onNext(InternalService.PTransmitDataResult.newBuilder() + .setStatus(Types.PStatus.newBuilder().setStatusCode(0)).build()); + responseObserver.onCompleted(); } @Override diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java index b13b19a60b5370..555ee92feb16ff 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java @@ -67,8 +67,7 @@ import java.util.UUID; /** - * This class is deprecated. - * If you want to start a FE server in unit test, please let your test + * @deprecated If you want to start a FE server in unit test, please let your test * class extend {@link TestWithFeService}. */ @Deprecated @@ -241,6 +240,7 @@ public static void cleanDorisFeDir(String baseDir) { try { FileUtils.deleteDirectory(new File(baseDir)); } catch (IOException e) { + // ignore } } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/common/BitmapValueUtil.java b/fe/hive-udf/src/main/java/org/apache/doris/common/BitmapValueUtil.java index 1d4f6be37ff651..5f2aa8cd4ac0e0 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/common/BitmapValueUtil.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/common/BitmapValueUtil.java @@ -34,7 +34,7 @@ public static byte[] serializeToBytes(BitmapValue bitmapValue) throws IOExceptio return bos.toByteArray(); } - public static BitmapValue deserializeToBitmap(byte[] bytes) throws IOException{ + public static BitmapValue deserializeToBitmap(byte[] bytes) throws IOException { BitmapValue bitmapValue = new BitmapValue(); DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); bitmapValue.deserialize(in); diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java index cf7033783d098a..8e32c82e3fbcd9 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java @@ -53,18 +53,18 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public Object evaluate(DeferredObject[] args) throws HiveException { - if(args[0] == null || args[1] == null){ + if (args[0] == null || args[1] == null) { return null; } byte[] inputBytes0 = this.inputOI0.getPrimitiveJavaObject(args[0].get()); byte[] inputBytes1 = this.inputOI1.getPrimitiveJavaObject(args[1].get()); - try{ + try { BitmapValue bitmapValue0 = BitmapValueUtil.deserializeToBitmap(inputBytes0); BitmapValue bitmapValue1 = BitmapValueUtil.deserializeToBitmap(inputBytes1); bitmapValue0.and(bitmapValue1); return BitmapValueUtil.serializeToBytes(bitmapValue1); - }catch (IOException ioException){ + } catch (IOException ioException) { ioException.printStackTrace(); throw new RuntimeException(ioException); } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java index 29adc008ec950c..2d718433e861c0 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java @@ -50,15 +50,15 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public Object evaluate(DeferredObject[] args) throws HiveException { - if(args[0] == null){ + if (args[0] == null) { return 0; } byte[] inputBytes = this.inputOI.getPrimitiveJavaObject(args[0].get()); - try{ + try { BitmapValue bitmapValue = BitmapValueUtil.deserializeToBitmap(inputBytes); return bitmapValue.cardinality(); - }catch (IOException ioException){ + } catch (IOException ioException) { ioException.printStackTrace(); throw new HiveException(ioException); } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java index f04b707b87f1db..a15d3f0faf8985 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java @@ -53,18 +53,18 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public Object evaluate(DeferredObject[] args) throws HiveException { - if(args[0] == null || args[1] == null){ + if (args[0] == null || args[1] == null) { return null; } byte[] inputBytes0 = this.inputOI0.getPrimitiveJavaObject(args[0].get()); byte[] inputBytes1 = this.inputOI1.getPrimitiveJavaObject(args[1].get()); - try{ + try { BitmapValue bitmapValue0 = BitmapValueUtil.deserializeToBitmap(inputBytes0); BitmapValue bitmapValue1 = BitmapValueUtil.deserializeToBitmap(inputBytes1); bitmapValue0.or(bitmapValue1); return BitmapValueUtil.serializeToBytes(bitmapValue1); - }catch (IOException ioException){ + } catch (IOException ioException) { ioException.printStackTrace(); throw new RuntimeException(ioException); } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java index 82140c8e16c8dd..0289c28da51115 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java @@ -105,7 +105,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep } @Override - public Object terminate(AggregationBuffer agg){ + public Object terminate(AggregationBuffer agg) { BitmapAgg myagg = (BitmapAgg) agg; try { return BitmapValueUtil.serializeToBytes(myagg.bitmap); @@ -126,7 +126,7 @@ public void merge(AggregationBuffer agg, Object partial) { } @Override - public Object terminatePartial(AggregationBuffer agg){ + public Object terminatePartial(AggregationBuffer agg) { return terminate(agg); } } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java index ec8c3d89dda3a3..cb9eebc9b2ec8a 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java @@ -53,18 +53,18 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public Object evaluate(DeferredObject[] args) throws HiveException { - if(args[0] == null || args[1] == null){ + if (args[0] == null || args[1] == null) { return null; } byte[] inputBytes0 = this.inputOI0.getPrimitiveJavaObject(args[0].get()); byte[] inputBytes1 = this.inputOI1.getPrimitiveJavaObject(args[1].get()); - try{ + try { BitmapValue bitmapValue0 = BitmapValueUtil.deserializeToBitmap(inputBytes0); BitmapValue bitmapValue1 = BitmapValueUtil.deserializeToBitmap(inputBytes1); bitmapValue0.xor(bitmapValue1); return BitmapValueUtil.serializeToBytes(bitmapValue1); - }catch (IOException ioException){ + } catch (IOException ioException) { ioException.printStackTrace(); throw new RuntimeException(ioException); } diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/ToBitmapUDAF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/ToBitmapUDAF.java index b4a5defc6ff55e..39821a845b6b42 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/ToBitmapUDAF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/ToBitmapUDAF.java @@ -112,7 +112,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep } @Override - public Object terminate(AggregationBuffer agg){ + public Object terminate(AggregationBuffer agg) { BitmapAgg myagg = (BitmapAgg) agg; try { return BitmapValueUtil.serializeToBytes(myagg.bitmap); @@ -133,7 +133,7 @@ public void merge(AggregationBuffer agg, Object partial) { } @Override - public Object terminatePartial(AggregationBuffer agg){ + public Object terminatePartial(AggregationBuffer agg) { return terminate(agg); } diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java b/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java index b4016e32e072b1..8997dcd85ad9f8 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java @@ -120,7 +120,6 @@ private static void listBeans(JsonGenerator jg) throws IOException { ObjectName oname = it.next(); MBeanInfo minfo; String code = ""; - Object attributeinfo = null; try { minfo = mBeanServer.getMBeanInfo(oname); code = minfo.getClassName(); diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java b/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java index 32ed79ca038d7d..b7e24116a197b2 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java @@ -91,8 +91,7 @@ public static String throwableToStackTrace(Throwable t) { /** * Serializes input into a byte[] using the default protocol factory. */ - public static > - byte[] serializeToThrift(T input) throws InternalException { + public static > byte[] serializeToThrift(T input) throws InternalException { TSerializer serializer = new TSerializer(protocolFactory_); try { return serializer.serialize(input); @@ -104,8 +103,8 @@ byte[] serializeToThrift(T input) throws InternalException { /** * Serializes input into a byte[] using a given protocol factory. */ - public static , F extends TProtocolFactory> - byte[] serializeToThrift(T input, F protocolFactory) throws InternalException { + public static , F extends TProtocolFactory> byte[] serializeToThrift( + T input, F protocolFactory) throws InternalException { TSerializer serializer = new TSerializer(protocolFactory); try { return serializer.serialize(input); @@ -114,17 +113,16 @@ byte[] serializeToThrift(T input, F protocolFactory) throws InternalException { } } - public static > - void deserializeThrift(T result, byte[] thriftData) throws InternalException { + public static > void deserializeThrift( + T result, byte[] thriftData) throws InternalException { deserializeThrift(protocolFactory_, result, thriftData); } /** * Deserialize a serialized form of a Thrift data structure to its object form. */ - public static , F extends TProtocolFactory> - void deserializeThrift(F protocolFactory, T result, byte[] thriftData) - throws InternalException { + public static , F extends TProtocolFactory> void deserializeThrift( + F protocolFactory, T result, byte[] thriftData) throws InternalException { // TODO: avoid creating deserializer for each query? TDeserializer deserializer = new TDeserializer(protocolFactory); try { diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java b/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java index 3a702e82d4b446..1d2560102acdeb 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java @@ -259,8 +259,8 @@ public void evaluate() throws UdfRuntimeException { // Currently, -1 indicates this column is not nullable. So input argument is // null iff inputNullsPtrs_ != -1 and nullCol[row_idx] != 0. if (UdfUtils.UNSAFE.getLong(null, - UdfUtils.getAddressAtOffset(inputNullsPtrs, i)) == -1 || - UdfUtils.UNSAFE.getByte(null, UdfUtils.UNSAFE.getLong(null, + UdfUtils.getAddressAtOffset(inputNullsPtrs, i)) == -1 + || UdfUtils.UNSAFE.getByte(null, UdfUtils.UNSAFE.getLong(null, UdfUtils.getAddressAtOffset(inputNullsPtrs, i)) + rowIdx) == 0) { inputArgs[i] = inputObjects[i]; } else { @@ -314,8 +314,8 @@ private boolean storeUdfResult(Object obj, long row) throws UdfRuntimeException } outputOffset += 1; UdfUtils.UNSAFE.putChar(null, UdfUtils.UNSAFE.getLong(null, outputBufferPtr) + outputOffset - 1, UdfUtils.END_OF_STRING); - UdfUtils.UNSAFE.putInt(null, UdfUtils.UNSAFE.getLong(null, outputOffsetsPtr) + - 4L * row, Integer.parseUnsignedInt(String.valueOf(outputOffset))); + UdfUtils.UNSAFE.putInt(null, UdfUtils.UNSAFE.getLong(null, outputOffsetsPtr) + + 4L * row, Integer.parseUnsignedInt(String.valueOf(outputOffset))); } return true; } @@ -702,4 +702,4 @@ private byte[] convertByteOrder(byte[] bytes) { } return bytes; } -} \ No newline at end of file +} diff --git a/fe/java-udf/src/test/java/org/apache/doris/udf/SimpleAddUdf.java b/fe/java-udf/src/test/java/org/apache/doris/udf/SimpleAddUdf.java index d0ed615156731c..7816ea4ab1201c 100644 --- a/fe/java-udf/src/test/java/org/apache/doris/udf/SimpleAddUdf.java +++ b/fe/java-udf/src/test/java/org/apache/doris/udf/SimpleAddUdf.java @@ -19,6 +19,6 @@ public class SimpleAddUdf { public Integer evaluate(Integer a, int b) { - return a == null? null: a + b; + return a == null ? null : a + b; } } diff --git a/fe/java-udf/src/test/java/org/apache/doris/udf/StringConcatUdf.java b/fe/java-udf/src/test/java/org/apache/doris/udf/StringConcatUdf.java index 2fa6c2754df8e4..2dd22d20315bf2 100644 --- a/fe/java-udf/src/test/java/org/apache/doris/udf/StringConcatUdf.java +++ b/fe/java-udf/src/test/java/org/apache/doris/udf/StringConcatUdf.java @@ -19,6 +19,6 @@ public class StringConcatUdf { public String evaluate(String a, String b) { - return a == null || b == null? null: a + b; + return a == null || b == null ? null : a + b; } } diff --git a/fe/java-udf/src/test/java/org/apache/doris/udf/UdfExecutorTest.java b/fe/java-udf/src/test/java/org/apache/doris/udf/UdfExecutorTest.java index e999c3d45e9c2c..814839f2aa1bbc 100644 --- a/fe/java-udf/src/test/java/org/apache/doris/udf/UdfExecutorTest.java +++ b/fe/java-udf/src/test/java/org/apache/doris/udf/UdfExecutorTest.java @@ -266,7 +266,7 @@ public void testConstantOneUdf() throws Exception { executor = new UdfExecutor(serializer.serialize(params)); executor.evaluate(); - for (int i = 0; i < 10; i ++) { + for (int i = 0; i < 10; i++) { assert (UdfUtils.UNSAFE.getByte(outputNull + i) == 0); assert (UdfUtils.UNSAFE.getInt(outputBuffer + 4 * i) == 1); } @@ -320,7 +320,7 @@ public void testSimpleAddUdf() throws Exception { UdfUtils.UNSAFE.putLong(inputNullPtr, inputNull1); UdfUtils.UNSAFE.putLong(inputNullPtr + 8, inputNull2); - for (int i = 0; i < batchSize; i ++) { + for (int i = 0; i < batchSize; i++) { UdfUtils.UNSAFE.putInt(null, inputBuffer1 + i * 4, i); UdfUtils.UNSAFE.putInt(null, inputBuffer2 + i * 4, i); @@ -343,7 +343,7 @@ public void testSimpleAddUdf() throws Exception { executor = new UdfExecutor(serializer.serialize(params)); executor.evaluate(); - for (int i = 0; i < batchSize; i ++) { + for (int i = 0; i < batchSize; i++) { if (i % 2 == 0) { assert (UdfUtils.UNSAFE.getByte(outputNull + i) == 1); } else { @@ -388,13 +388,13 @@ public void testStringConcatUdf() throws Exception { long[] inputOffsets2 = new long[batchSize]; long inputBufferSize1 = 0; long inputBufferSize2 = 0; - for (int i = 0; i < batchSize; i ++) { + for (int i = 0; i < batchSize; i++) { input1[i] = "Input1_" + i; input2[i] = "Input2_" + i; - inputOffsets1[i] = i == 0? input1[i].getBytes(StandardCharsets.UTF_8).length + 1: - inputOffsets1[i - 1] + input1[i].getBytes(StandardCharsets.UTF_8).length + 1; - inputOffsets2[i] = i == 0? input2[i].getBytes(StandardCharsets.UTF_8).length + 1: - inputOffsets2[i - 1] + input2[i].getBytes(StandardCharsets.UTF_8).length + 1; + inputOffsets1[i] = i == 0 ? input1[i].getBytes(StandardCharsets.UTF_8).length + 1 + : inputOffsets1[i - 1] + input1[i].getBytes(StandardCharsets.UTF_8).length + 1; + inputOffsets2[i] = i == 0 ? input2[i].getBytes(StandardCharsets.UTF_8).length + 1 + : inputOffsets2[i - 1] + input2[i].getBytes(StandardCharsets.UTF_8).length + 1; inputBufferSize1 += input1[i].getBytes(StandardCharsets.UTF_8).length; inputBufferSize2 += input2[i].getBytes(StandardCharsets.UTF_8).length; } @@ -433,7 +433,7 @@ public void testStringConcatUdf() throws Exception { UdfUtils.UNSAFE.putLong(inputOffsetsPtr, inputOffset1); UdfUtils.UNSAFE.putLong(inputOffsetsPtr + 8, inputOffset2); - for (int i = 0; i < batchSize; i ++) { + for (int i = 0; i < batchSize; i++) { if (i == 0) { UdfUtils.copyMemory(input1[i].getBytes(StandardCharsets.UTF_8), UdfUtils.BYTE_ARRAY_OFFSET, null, inputBuffer1, @@ -471,9 +471,9 @@ public void testStringConcatUdf() throws Exception { executor = new UdfExecutor(serializer.serialize(params)); executor.evaluate(); - for (int i = 0; i < batchSize; i ++) { - byte[] bytes = new byte[input1[i].getBytes(StandardCharsets.UTF_8).length + - input2[i].getBytes(StandardCharsets.UTF_8).length]; + for (int i = 0; i < batchSize; i++) { + byte[] bytes = new byte[input1[i].getBytes(StandardCharsets.UTF_8).length + + input2[i].getBytes(StandardCharsets.UTF_8).length]; assert (UdfUtils.UNSAFE.getByte(outputNull + i) == 0); if (i == 0) { UdfUtils.copyMemory(null, outputBuffer, bytes, UdfUtils.BYTE_ARRAY_OFFSET, diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java index f09d4641f982df..1c40d58896a984 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java @@ -52,7 +52,7 @@ public int getPartition(Object var1) { && partitionInfo.partitionType.equalsIgnoreCase(UNPARTITIONED_TYPE)) { return 0; } - DppColumns key = (DppColumns)var1; + DppColumns key = (DppColumns) var1; // get the partition columns from key as partition key DppColumns partitionKey = new DppColumns(key, partitionKeyIndexes); // TODO: optimize this by use binary search @@ -78,11 +78,11 @@ public boolean isRowContained(DppColumns row) { } public String toString() { - return "PartitionRangeKey{" + - "isMaxPartition=" + isMaxPartition + - ", startKeys=" + startKeys + - ", endKeys=" + endKeys + - '}'; + return "PartitionRangeKey{" + + "isMaxPartition=" + isMaxPartition + + ", startKeys=" + startKeys + + ", endKeys=" + endKeys + + '}'; } } } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java index 2905cb6d83cc9e..ff4ebb8da88bdf 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java @@ -30,11 +30,11 @@ class DppColumns implements Comparable, Serializable { public List columns = new ArrayList(); - public DppColumns(List keys){ + public DppColumns(List keys) { this.columns = keys; } - public DppColumns(DppColumns key, List indexes){ + public DppColumns(DppColumns key, List indexes) { for (int i = 0; i < indexes.size(); ++i) { columns.add(key.columns.get(indexes.get(i))); } @@ -58,23 +58,23 @@ public int compareTo(DppColumns other) { } } if (columns.get(i) instanceof Integer) { - cmp = ((Integer)(columns.get(i))).compareTo((Integer)(other.columns.get(i))); + cmp = ((Integer) (columns.get(i))).compareTo((Integer) (other.columns.get(i))); } else if (columns.get(i) instanceof Long) { - cmp = ((Long)(columns.get(i))).compareTo((Long)(other.columns.get(i))); + cmp = ((Long) (columns.get(i))).compareTo((Long) (other.columns.get(i))); } else if (columns.get(i) instanceof Boolean) { - cmp = ((Boolean)(columns.get(i))).compareTo((Boolean) (other.columns.get(i))); + cmp = ((Boolean) (columns.get(i))).compareTo((Boolean) (other.columns.get(i))); } else if (columns.get(i) instanceof Short) { - cmp = ((Short)(columns.get(i))).compareTo((Short)(other.columns.get(i))); + cmp = ((Short) (columns.get(i))).compareTo((Short) (other.columns.get(i))); } else if (columns.get(i) instanceof Float) { - cmp = ((Float)(columns.get(i))).compareTo((Float) (other.columns.get(i))); + cmp = ((Float) (columns.get(i))).compareTo((Float) (other.columns.get(i))); } else if (columns.get(i) instanceof Double) { - cmp = ((Double)(columns.get(i))).compareTo((Double) (other.columns.get(i))); + cmp = ((Double) (columns.get(i))).compareTo((Double) (other.columns.get(i))); } else if (columns.get(i) instanceof Date) { - cmp = ((Date)(columns.get(i))).compareTo((Date) (other.columns.get(i))); + cmp = ((Date) (columns.get(i))).compareTo((Date) (other.columns.get(i))); } else if (columns.get(i) instanceof java.sql.Timestamp) { - cmp = ((java.sql.Timestamp)columns.get(i)).compareTo((java.sql.Timestamp)other.columns.get(i)); + cmp = ((java.sql.Timestamp) columns.get(i)).compareTo((java.sql.Timestamp) other.columns.get(i)); } else { - cmp = ((String)(columns.get(i))).compareTo((String) (other.columns.get(i))); + cmp = ((String) (columns.get(i))).compareTo((String) (other.columns.get(i))); } if (cmp != 0) { return cmp; @@ -102,9 +102,9 @@ public int hashCode() { @Override public String toString() { - return "dppColumns{" + - "columns=" + columns + - '}'; + return "dppColumns{" + + "columns=" + columns + + '}'; } } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java index 3d37acef88827e..e28c41eb0b51e1 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java @@ -61,7 +61,7 @@ public static Class getClassFromDataType(DataType dataType) { } else if (dataType.equals(DataTypes.StringType)) { return String.class; } else if (dataType instanceof DecimalType) { - DecimalType decimalType = (DecimalType)dataType; + DecimalType decimalType = (DecimalType) dataType; return BigDecimal.valueOf(decimalType.precision(), decimalType.scale()).getClass(); } else if (dataType.equals(DataTypes.TimestampType)) { return Long.class; @@ -162,13 +162,13 @@ public static ByteBuffer getHashValue(Object o, DataType type) { return buffer; } if (type.equals(DataTypes.ByteType)) { - buffer.put((byte)o); + buffer.put((byte) o); } else if (type.equals(DataTypes.ShortType)) { - buffer.putShort((Short)o); + buffer.putShort((Short) o); } else if (type.equals(DataTypes.IntegerType)) { buffer.putInt((Integer) o); } else if (type.equals(DataTypes.LongType)) { - buffer.putLong((Long)o); + buffer.putLong((Long) o); } else if (type.equals(DataTypes.StringType)) { try { String str = String.valueOf(o); @@ -177,7 +177,7 @@ public static ByteBuffer getHashValue(Object o, DataType type) { throw new RuntimeException(e); } } else if (type.equals(DataTypes.BooleanType)) { - Boolean b = (Boolean)o; + Boolean b = (Boolean) o; byte value = (byte) (b ? 1 : 0); buffer.put(value); } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java index 8e2124ae4ba306..6fc12edb0be5c0 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java @@ -171,9 +171,8 @@ public void extractDistinctColumn() { // For the column in dictColumns's valueSet, their value is a subset of column in keyset, // so we don't need to extract distinct value of column in valueSet for (Object column : dictColumn.keySet()) { - workerList.add(()->{ - spark.sql(getInsertDistinctKeyTableSql(column.toString(), dorisIntermediateHiveTable)); - }); + workerList.add( + () -> spark.sql(getInsertDistinctKeyTableSql(column.toString(), dorisIntermediateHiveTable))); } submitWorker(workerList); @@ -186,7 +185,7 @@ public void buildGlobalDict() throws ExecutionException, InterruptedException { List globalDictBuildWorkers = new ArrayList<>(); for (Object distinctColumnNameOrigin : dictColumn.keySet()) { String distinctColumnNameTmp = distinctColumnNameOrigin.toString(); - globalDictBuildWorkers.add(()->{ + globalDictBuildWorkers.add(() -> { // get global dict max value List maxGlobalDictValueRow = spark.sql(getMaxGlobalDictValueSql(distinctColumnNameTmp)).collectAsList(); if (maxGlobalDictValueRow.size() == 0) { @@ -197,8 +196,8 @@ public void buildGlobalDict() throws ExecutionException, InterruptedException { long minDictValue = 0; Row row = maxGlobalDictValueRow.get(0); if (row != null && row.get(0) != null) { - maxDictValue = (long)row.get(0); - minDictValue = (long)row.get(1); + maxDictValue = (long) row.get(0); + minDictValue = (long) row.get(1); } LOG.info(" column " + distinctColumnNameTmp + " 's max value in dict is " + maxDictValue + ", min value is " + minDictValue); // maybe never happened, but we need detect it @@ -222,7 +221,8 @@ public void buildGlobalDict() throws ExecutionException, InterruptedException { // encode dorisIntermediateHiveTable's distinct column public void encodeDorisIntermediateHiveTable() { for (Object distinctColumnObj : dictColumn.keySet()) { - spark.sql(getEncodeDorisIntermediateHiveTableSql(distinctColumnObj.toString(), (ArrayList)dictColumn.get(distinctColumnObj.toString()))); + spark.sql(getEncodeDorisIntermediateHiveTableSql(distinctColumnObj.toString(), + (ArrayList) dictColumn.get(distinctColumnObj.toString()))); } } @@ -339,15 +339,18 @@ private String getBuildGlobalDictSql(long maxGlobalDictValue, String distinctCol + "CAST((row_number() over(order by t1.dict_key)) as BIGINT) + " + "CAST(" + maxGlobalDictValue + " as BIGINT) as dict_value from " + "(select dict_key from " + distinctKeyTableName + " where dict_column='" + distinctColumnName + "' and dict_key is not null)t1 left join " - + " (select dict_key,dict_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "' )t2 " + - "on t1.dict_key = t2.dict_key where t2.dict_value is null"; + + " (select dict_key,dict_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "' )t2 " + + "on t1.dict_key = t2.dict_key where t2.dict_value is null"; } private String getNewDistinctValue(String distinctColumnName) { - return "select t1.dict_key from " + - " (select dict_key from " + distinctKeyTableName + " where dict_column='" + distinctColumnName + "' and dict_key is not null)t1 left join " + - " (select dict_key,dict_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "' )t2 " + - "on t1.dict_key = t2.dict_key where t2.dict_value is null"; + return "select t1.dict_key from " + + " (select dict_key from " + distinctKeyTableName + + " where dict_column='" + distinctColumnName + + "' and dict_key is not null)t1 left join " + + " (select dict_key,dict_value from " + globalDictTableName + + " where dict_column='" + distinctColumnName + "' )t2 " + + "on t1.dict_key = t2.dict_key where t2.dict_value is null"; } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java index 6b680c4d917380..02d7e79adf87f8 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java @@ -84,7 +84,7 @@ public RollupTreeNode build(EtlJobConfig.EtlTable tableMeta) { valueColumns.add(column.columnName); } } - if(!insertIndex(root, indexMetas.get(i), keyColumns, valueColumns)) { + if (!insertIndex(root, indexMetas.get(i), keyColumns, valueColumns)) { throw new RuntimeException(String.format("can't find a parent rollup for rollup %s, rollup tree is %s", indexMetas.get(i).toString(), root.toString())); } @@ -100,7 +100,7 @@ private boolean insertIndex(RollupTreeNode root, EtlJobConfig.EtlIndex indexMeta // find suitable parent rollup from current node's children if (root.children != null) { for (int i = root.children.size() - 1; i >= 0; i--) { - if(insertIndex(root.children.get(i), indexMeta, keyColumns, valueColumns)){ + if (insertIndex(root.children.get(i), indexMeta, keyColumns, valueColumns)) { return true; } } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java index f5f14039a03dca..76fa3cf5fa0ae4 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.parquet.column.ParquetProperties; +import org.apache.parquet.column.ParquetProperties.WriterVersion; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.spark.Partitioner; @@ -119,7 +119,7 @@ public SparkDpp(SparkSession spark, EtlJobConfig etlJobConfig, Map, Object[]> processRDDAggregate(JavaPairRDD, Object[]> processRDDAggregate(JavaPairRDD, Object[]> resultRDD, - String pathPattern, - long tableId, - EtlJobConfig.EtlIndex indexMeta, - SparkRDDAggregator[] sparkRDDAggregators) throws SparkDppException { + String pathPattern, long tableId, + EtlJobConfig.EtlIndex indexMeta, SparkRDDAggregator[] sparkRDDAggregators) { // TODO(wb) should deal largeint as BigInteger instead of string when using biginteger as key, // data type may affect sorting logic StructType dstSchema = DppUtils.createDstTableSchema(indexMeta.columns, false, true); ExpressionEncoder encoder = RowEncoder.apply(dstSchema); resultRDD.repartitionAndSortWithinPartitions(new BucketPartitioner(bucketKeyMap), new BucketComparator()) - .foreachPartition(new VoidFunction,Object[]>>>() { - @Override - public void call(Iterator, Object[]>> t) throws Exception { - // write the data to dst file - Configuration conf = new Configuration(serializableHadoopConf.value()); - FileSystem fs = FileSystem.get(URI.create(etlJobConfig.outputPath), conf); - String lastBucketKey = null; - ParquetWriter parquetWriter = null; - TaskContext taskContext = TaskContext.get(); - long taskAttemptId = taskContext.taskAttemptId(); - String dstPath = ""; - String tmpPath = ""; - - while (t.hasNext()) { - Tuple2, Object[]> pair = t.next(); - List keyColumns = pair._1(); - Object[] valueColumns = pair._2(); - if ((keyColumns.size() + valueColumns.length) <= 1) { - LOG.warn("invalid row:" + pair); - continue; - } - + .foreachPartition((VoidFunction, Object[]>>>) t -> { + // write the data to dst file + Configuration conf = new Configuration(serializableHadoopConf.value()); + FileSystem fs = FileSystem.get(URI.create(etlJobConfig.outputPath), conf); + String lastBucketKey = null; + ParquetWriter parquetWriter = null; + TaskContext taskContext = TaskContext.get(); + long taskAttemptId = taskContext.taskAttemptId(); + String dstPath = ""; + String tmpPath = ""; + + while (t.hasNext()) { + Tuple2, Object[]> pair = t.next(); + List keyColumns = pair._1(); + Object[] valueColumns = pair._2(); + if ((keyColumns.size() + valueColumns.length) <= 1) { + LOG.warn("invalid row:" + pair); + continue; + } - String curBucketKey = keyColumns.get(0).toString(); - List columnObjects = new ArrayList<>(); - for (int i = 1; i < keyColumns.size(); ++i) { - columnObjects.add(keyColumns.get(i)); - } - for (int i = 0; i < valueColumns.length; ++i) { - columnObjects.add(sparkRDDAggregators[i].finalize(valueColumns[i])); - } - Row rowWithoutBucketKey = RowFactory.create(columnObjects.toArray()); - // if the bucket key is new, it will belong to a new tablet - if (lastBucketKey == null || !curBucketKey.equals(lastBucketKey)) { - if (parquetWriter != null) { - parquetWriter.close(); - // rename tmpPath to path - try { - fs.rename(new Path(tmpPath), new Path(dstPath)); - } catch (IOException ioe) { - LOG.warn("rename from tmpPath" + tmpPath + " to dstPath:" + dstPath + " failed. exception:" + ioe); - throw ioe; - } + String curBucketKey = keyColumns.get(0).toString(); + List columnObjects = new ArrayList<>(); + for (int i = 1; i < keyColumns.size(); ++i) { + columnObjects.add(keyColumns.get(i)); } - // flush current writer and create a new writer - String[] bucketKey = curBucketKey.split("_"); - if (bucketKey.length != 2) { - LOG.warn("invalid bucket key:" + curBucketKey); - continue; + for (int i = 0; i < valueColumns.length; ++i) { + columnObjects.add(sparkRDDAggregators[i].finalize(valueColumns[i])); } - long partitionId = Long.parseLong(bucketKey[0]); - int bucketId = Integer.parseInt(bucketKey[1]); - dstPath = String.format(pathPattern, tableId, partitionId, indexMeta.indexId, - bucketId, indexMeta.schemaHash); - tmpPath = dstPath + "." + taskAttemptId; - conf.setBoolean("spark.sql.parquet.writeLegacyFormat", false); - conf.setBoolean("spark.sql.parquet.int64AsTimestampMillis", false); - conf.setBoolean("spark.sql.parquet.int96AsTimestamp", true); - conf.setBoolean("spark.sql.parquet.binaryAsString", false); - conf.set("spark.sql.parquet.outputTimestampType", "INT96"); - ParquetWriteSupport.setSchema(dstSchema, conf); - ParquetWriteSupport parquetWriteSupport = new ParquetWriteSupport(); - parquetWriter = new ParquetWriter(new Path(tmpPath), parquetWriteSupport, - CompressionCodecName.SNAPPY, 256 * 1024 * 1024, 16 * 1024, 1024 * 1024, - true, false, - ParquetProperties.WriterVersion.PARQUET_1_0, conf); - if (parquetWriter != null) { - LOG.info("[HdfsOperate]>> initialize writer succeed! path:" + tmpPath); + + Row rowWithoutBucketKey = RowFactory.create(columnObjects.toArray()); + // if the bucket key is new, it will belong to a new tablet + if (lastBucketKey == null || !curBucketKey.equals(lastBucketKey)) { + if (parquetWriter != null) { + parquetWriter.close(); + // rename tmpPath to path + try { + fs.rename(new Path(tmpPath), new Path(dstPath)); + } catch (IOException ioe) { + LOG.warn("rename from tmpPath" + tmpPath + " to dstPath:" + dstPath + + " failed. exception:" + ioe); + throw ioe; + } + } + // flush current writer and create a new writer + String[] bucketKey = curBucketKey.split("_"); + if (bucketKey.length != 2) { + LOG.warn("invalid bucket key:" + curBucketKey); + continue; + } + long partitionId = Long.parseLong(bucketKey[0]); + int bucketId = Integer.parseInt(bucketKey[1]); + dstPath = String.format(pathPattern, tableId, partitionId, indexMeta.indexId, bucketId, + indexMeta.schemaHash); + tmpPath = dstPath + "." + taskAttemptId; + conf.setBoolean("spark.sql.parquet.writeLegacyFormat", false); + conf.setBoolean("spark.sql.parquet.int64AsTimestampMillis", false); + conf.setBoolean("spark.sql.parquet.int96AsTimestamp", true); + conf.setBoolean("spark.sql.parquet.binaryAsString", false); + conf.set("spark.sql.parquet.outputTimestampType", "INT96"); + ParquetWriteSupport.setSchema(dstSchema, conf); + ParquetWriteSupport parquetWriteSupport = new ParquetWriteSupport(); + parquetWriter = new ParquetWriter(new Path(tmpPath), parquetWriteSupport, + CompressionCodecName.SNAPPY, 256 * 1024 * 1024, 16 * 1024, 1024 * 1024, true, false, + WriterVersion.PARQUET_1_0, conf); + if (parquetWriter != null) { + LOG.info("[HdfsOperate]>> initialize writer succeed! path:" + tmpPath); + } + lastBucketKey = curBucketKey; } - lastBucketKey = curBucketKey; + InternalRow internalRow = encoder.toRow(rowWithoutBucketKey); + parquetWriter.write(internalRow); } - InternalRow internalRow = encoder.toRow(rowWithoutBucketKey); - parquetWriter.write(internalRow); - } - if (parquetWriter != null) { - parquetWriter.close(); - try { - fs.rename(new Path(tmpPath), new Path(dstPath)); - } catch (IOException ioe) { - LOG.warn("rename from tmpPath" + tmpPath + " to dstPath:" + dstPath + " failed. exception:" + ioe); - throw ioe; + if (parquetWriter != null) { + parquetWriter.close(); + try { + fs.rename(new Path(tmpPath), new Path(dstPath)); + } catch (IOException ioe) { + LOG.warn("rename from tmpPath" + tmpPath + " to dstPath:" + dstPath + " failed. exception:" + + ioe); + throw ioe; + } } - } - } - });} + }); + } // TODO(wb) one shuffle to calculate the rollup in the same level private void processRollupTree(RollupTreeNode rootNode, @@ -453,7 +450,7 @@ public Iterator, Object[]>> call(Row row) throws Exception { for (int i = 0; i < keyColumnNames.size(); i++) { String columnName = keyColumnNames.get(i); Object columnObject = row.get(row.fieldIndex(columnName)); - if(!validateData(columnObject, baseIndex.getColumn(columnName), parsers.get(i), row)) { + if (!validateData(columnObject, baseIndex.getColumn(columnName), parsers.get(i), row)) { abnormalRowAcc.add(1); return result.iterator(); } @@ -463,7 +460,8 @@ public Iterator, Object[]>> call(Row row) throws Exception { for (int i = 0; i < valueColumnNames.size(); i++) { String columnName = valueColumnNames.get(i); Object columnObject = row.get(row.fieldIndex(columnName)); - if(!validateData(columnObject, baseIndex.getColumn(columnName), parsers.get(i + keyColumnNames.size()),row)) { + if (!validateData(columnObject, baseIndex.getColumn(columnName), + parsers.get(i + keyColumnNames.size()), row)) { abnormalRowAcc.add(1); return result.iterator(); } @@ -616,7 +614,7 @@ private Dataset loadDataFromPath(SparkSession spark, for (EtlJobConfig.EtlColumn column : baseIndex.columns) { parsers.add(ColumnParser.create(column)); } - char separator = (char)fileGroup.columnSeparator.getBytes(Charset.forName("UTF-8"))[0]; + char separator = (char) fileGroup.columnSeparator.getBytes(Charset.forName("UTF-8"))[0]; // now we first support csv file // TODO: support parquet file and orc file JavaRDD rowRDD = sourceDataRdd.flatMap( @@ -714,7 +712,7 @@ private String[] splitLine(String line, char sep) { int lastIndex = 0; // line-begin char and line-end char are considered to be 'delimeter' List values = new ArrayList<>(); - for (int i = 0 ; i < line.length(); i++, index++) { + for (int i = 0; i < line.length(); i++, index++) { if (line.charAt(index) == sep) { values.add(line.substring(lastIndex, index)); lastIndex = index + 1; @@ -741,13 +739,13 @@ private Object convertPartitionKey(Object srcValue, Class dstClass) throws Spark // TODO(wb) gson will cast origin value to double by default // when the partition column is largeint, this will cause error data // need fix it thoroughly - return new BigInteger(((Double) srcValue).toString()); + return new BigInteger(srcValue.toString()); } else if (dstClass.equals(java.sql.Date.class) || dstClass.equals(java.util.Date.class)) { - double srcValueDouble = (double)srcValue; + double srcValueDouble = (double) srcValue; return convertToJavaDate((int) srcValueDouble); } else if (dstClass.equals(java.sql.Timestamp.class)) { - double srcValueDouble = (double)srcValue; - return convertToJavaDatetime((long)srcValueDouble); + double srcValueDouble = (double) srcValue; + return convertToJavaDatetime((long) srcValueDouble); } else { // dst type is string return srcValue.toString(); @@ -901,14 +899,14 @@ private Dataset checkDataFromHiveWithStrictMode( if (dictBitmapColumnSet.contains(column.columnName.toLowerCase())) { continue; } - if (binaryBitmapColumnsSet.contains(column.columnName.toLowerCase())){ + if (binaryBitmapColumnsSet.contains(column.columnName.toLowerCase())) { continue; } columnNameNeedCheckArrayList.add(column); columnParserArrayList.add(new BigIntParser()); - } else if (!StringUtils.equalsIgnoreCase(column.columnType, "varchar") && - !StringUtils.equalsIgnoreCase(column.columnType, "char") && - !mappingColKeys.contains(column.columnName)) { + } else if (!StringUtils.equalsIgnoreCase(column.columnType, "varchar") + && !StringUtils.equalsIgnoreCase(column.columnType, "char") + && !mappingColKeys.contains(column.columnName)) { columnNameNeedCheckArrayList.add(column); columnParserArrayList.add(ColumnParser.create(column)); } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java index fcc5e1725ff971..d5b37ec3910ae3 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java @@ -54,15 +54,15 @@ T init(Object value) { Object finalize(Object value) { return value; - }; + } public static SparkRDDAggregator buildAggregator(EtlJobConfig.EtlColumn column) throws SparkDppException { String aggType = StringUtils.lowerCase(column.aggregationType); String columnType = StringUtils.lowerCase(column.columnType); switch (aggType) { - case "bitmap_union" : + case "bitmap_union": return new BitmapUnionAggregator(); - case "hll_union" : + case "hll_union": return new HllUnionAggregator(); case "max": switch (columnType) { @@ -236,8 +236,8 @@ BitmapValue init(Object value) { BitmapValue bitmapValue = new BitmapValue(); if (value instanceof byte[]) { bitmapValue.deserialize(new DataInputStream(new ByteArrayInputStream((byte[]) value))); - } else if (value != null){ - bitmapValue.add(Long.valueOf(value.toString())); + } else if (value != null) { + bitmapValue.add(Long.parseLong(value.toString())); } return bitmapValue; } catch (Exception e) { @@ -262,7 +262,7 @@ byte[] finalize(Object value) { try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream outputStream = new DataOutputStream(bos); - ((BitmapValue)value).serialize(outputStream); + ((BitmapValue) value).serialize(outputStream); return bos.toByteArray(); } catch (IOException ioException) { ioException.printStackTrace(); @@ -280,7 +280,7 @@ Hll init(Object value) { Hll hll = new Hll(); if (value instanceof byte[]) { hll.deserialize(new DataInputStream(new ByteArrayInputStream((byte[]) value))); - } else if (value != null){ + } else if (value != null) { hll.updateWithHash(value); } return hll; @@ -306,7 +306,7 @@ byte[] finalize(Object value) { try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream outputStream = new DataOutputStream(bos); - ((Hll)value).serialize(outputStream); + ((Hll) value).serialize(outputStream); return bos.toByteArray(); } catch (IOException ioException) { ioException.printStackTrace(); @@ -382,7 +382,7 @@ Object update(Object dst, Object src) { if (dst == null) { return src; } - return ((Comparable)dst).compareTo(src) > 0 ? dst : src; + return ((Comparable) dst).compareTo(src) > 0 ? dst : src; } } @@ -397,7 +397,7 @@ Object update(Object dst, Object src) { if (dst == null) { return src; } - return ((Comparable)dst).compareTo(src) < 0 ? dst : src; + return ((Comparable) dst).compareTo(src) < 0 ? dst : src; } } @@ -427,7 +427,7 @@ Short update(Short dst, Short src) { } int ret = dst + src; // here may overflow, just keep the same logic with be - return (short)ret; + return (short) ret; } } @@ -459,7 +459,7 @@ Byte update(Byte dst, Byte src) { } int ret = dst + src; // here may overflow, just keep the same logic with be - return (byte)ret; + return (byte) ret; } } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/StringAccumulator.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/StringAccumulator.java index afd23fafeaa074..428a9d42b3963e 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/StringAccumulator.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/StringAccumulator.java @@ -54,7 +54,7 @@ public void add(String v) { @Override public void merge(AccumulatorV2 other) { - StringAccumulator o = (StringAccumulator)other; + StringAccumulator o = (StringAccumulator) other; strs.addAll(o.strs); } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java index ea9df1a1331396..43c0b73d151c43 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java @@ -170,14 +170,14 @@ public EtlJobConfig(Map tables, String outputFilePattern, String @Override public String toString() { - return "EtlJobConfig{" + - "tables=" + tables + - ", outputPath='" + outputPath + '\'' + - ", outputFilePattern='" + outputFilePattern + '\'' + - ", label='" + label + '\'' + - ", properties=" + properties + - ", version=" + configVersion + - '}'; + return "EtlJobConfig{" + + "tables=" + tables + + ", outputPath='" + outputPath + '\'' + + ", outputFilePattern='" + outputFilePattern + '\'' + + ", label='" + label + '\'' + + ", properties=" + properties + + ", version=" + configVersion + + '}'; } public String getOutputPath() { @@ -240,10 +240,10 @@ public static class EtlJobProperty implements Serializable { @Override public String toString() { - return "EtlJobProperty{" + - "strictMode=" + strictMode + - ", timezone='" + timezone + '\'' + - '}'; + return "EtlJobProperty{" + + "strictMode=" + strictMode + + ", timezone='" + timezone + '\'' + + '}'; } } @@ -280,11 +280,11 @@ public void addFileGroup(EtlFileGroup etlFileGroup) { @Override public String toString() { - return "EtlTable{" + - "indexes=" + indexes + - ", partitionInfo=" + partitionInfo + - ", fileGroups=" + fileGroups + - '}'; + return "EtlTable{" + + "indexes=" + indexes + + ", partitionInfo=" + partitionInfo + + ", fileGroups=" + fileGroups + + '}'; } } @@ -329,18 +329,18 @@ public EtlColumn(String columnName, String columnType, boolean isAllowNull, bool @Override public String toString() { - return "EtlColumn{" + - "columnName='" + columnName + '\'' + - ", columnType='" + columnType + '\'' + - ", isAllowNull=" + isAllowNull + - ", isKey=" + isKey + - ", aggregationType='" + aggregationType + '\'' + - ", defaultValue='" + defaultValue + '\'' + - ", stringLength=" + stringLength + - ", precision=" + precision + - ", scale=" + scale + - ", defineExpr='" + defineExpr + '\'' + - '}'; + return "EtlColumn{" + + "columnName='" + columnName + '\'' + + ", columnType='" + columnType + '\'' + + ", isAllowNull=" + isAllowNull + + ", isKey=" + isKey + + ", aggregationType='" + aggregationType + '\'' + + ", defaultValue='" + defaultValue + '\'' + + ", stringLength=" + stringLength + + ", precision=" + precision + + ", scale=" + scale + + ", defineExpr='" + defineExpr + '\'' + + '}'; } } @@ -390,13 +390,13 @@ public EtlColumn getColumn(String name) { @Override public String toString() { - return "EtlIndex{" + - "indexId=" + indexId + - ", columns=" + columns + - ", schemaHash=" + schemaHash + - ", indexType='" + indexType + '\'' + - ", isBaseIndex=" + isBaseIndex + - '}'; + return "EtlIndex{" + + "indexId=" + indexId + + ", columns=" + columns + + ", schemaHash=" + schemaHash + + ", indexType='" + indexType + '\'' + + ", isBaseIndex=" + isBaseIndex + + '}'; } } @@ -420,12 +420,12 @@ public EtlPartitionInfo(String partitionType, List partitionColumnRefs, @Override public String toString() { - return "EtlPartitionInfo{" + - "partitionType='" + partitionType + '\'' + - ", partitionColumnRefs=" + partitionColumnRefs + - ", distributionColumnRefs=" + distributionColumnRefs + - ", partitions=" + partitions + - '}'; + return "EtlPartitionInfo{" + + "partitionType='" + partitionType + '\'' + + ", partitionColumnRefs=" + partitionColumnRefs + + ", distributionColumnRefs=" + distributionColumnRefs + + ", partitions=" + partitions + + '}'; } } @@ -452,13 +452,13 @@ public EtlPartition(long partitionId, List startKeys, List endKe @Override public String toString() { - return "EtlPartition{" + - "partitionId=" + partitionId + - ", startKeys=" + startKeys + - ", endKeys=" + endKeys + - ", isMaxPartition=" + isMaxPartition + - ", bucketNum=" + bucketNum + - '}'; + return "EtlPartition{" + + "partitionId=" + partitionId + + ", startKeys=" + startKeys + + ", endKeys=" + endKeys + + ", isMaxPartition=" + isMaxPartition + + ", bucketNum=" + bucketNum + + '}'; } } @@ -527,21 +527,21 @@ public EtlFileGroup(SourceType sourceType, String hiveDbTableName, Map partitionColumns = new ArrayList<>(); List bucketColumns = new ArrayList<>(); bucketColumns.add("key"); EtlJobConfig.EtlPartitionInfo partitionInfo = new EtlJobConfig.EtlPartitionInfo( "UNPARTITIONED", null, bucketColumns, null); - List partitionRangeKeys = new ArrayList<>(); List partitionSchema = new ArrayList<>(); partitionSchema.add(Integer.class); List partitionKeyIndexes = new ArrayList<>(); diff --git a/fe/spark-dpp/src/test/java/org/apache/doris/load/loadv2/dpp/DppUtilsTest.java b/fe/spark-dpp/src/test/java/org/apache/doris/load/loadv2/dpp/DppUtilsTest.java index 25e8c136013147..e31d2f94bddd39 100644 --- a/fe/spark-dpp/src/test/java/org/apache/doris/load/loadv2/dpp/DppUtilsTest.java +++ b/fe/spark-dpp/src/test/java/org/apache/doris/load/loadv2/dpp/DppUtilsTest.java @@ -181,7 +181,7 @@ public void testGetDataTypeFromColumn() { Assert.assertEquals(DataTypes.DateType, dateResult); } catch (Exception e) { Assert.assertTrue(false); - } + } } @Test