diff --git a/CMakeLists.txt b/CMakeLists.txt index 8dc1cc0ce63..6d627135bb0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -487,7 +487,7 @@ set(THREAD_DIR ${CMAKE_SOURCE_DIR}/src/thread) set(TOOLS_DIR ${CMAKE_SOURCE_DIR}/src/tools) set(TRANSACTION_DIR ${CMAKE_SOURCE_DIR}/src/transaction) set(WIN_TOOLS_DIR ${CMAKE_SOURCE_DIR}/src/win_tools) - +set(XASL_DIR ${CMAKE_SOURCE_DIR}/src/xasl) include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories( @@ -513,6 +513,7 @@ include_directories( src/thread src/tools src/transaction + src/xasl ) include(CheckFunctionExists) diff --git a/cs/CMakeLists.txt b/cs/CMakeLists.txt index 770edca3a62..5561db0c3f4 100644 --- a/cs/CMakeLists.txt +++ b/cs/CMakeLists.txt @@ -247,6 +247,16 @@ set(STORAGE_SOURCES ${STORAGE_DIR}/es.c ${STORAGE_DIR}/es_posix.c ) + +set(XASL_SOURCES + ${XASL_DIR}/access_json_table.cpp + ${XASL_DIR}/xasl_stream.cpp + ) +set(XASL_HEADERS + ${XASL_DIR}/access_json_table.hpp + ${XASL_DIR}/compile_context.h + ${XASL_DIR}/xasl_stream.hpp + ) list(APPEND CONNECTION_SOURCES ${CONNECTION_DIR}/heartbeat.c) if(UNIX) @@ -325,6 +335,7 @@ SET_SOURCE_FILES_PROPERTIES( ${STORAGE_SOURCES} ${ES_SOURCES} ${PROBES_OBJECT} + ${XASL_SOURCES} PROPERTIES LANGUAGE CXX ) SET_SOURCE_FILES_PROPERTIES( @@ -358,6 +369,8 @@ add_library(cubridcs SHARED ${STORAGE_SOURCES} ${ES_SOURCES} ${PROBES_OBJECT} + ${XASL_SOURCES} + ${XASL_HEADERS} ) set_target_properties(cubridcs PROPERTIES SOVERSION "${CUBRID_MAJOR_VERSION}.${CUBRID_MINOR_VERSION}") diff --git a/cubrid/CMakeLists.txt b/cubrid/CMakeLists.txt index 4f6a8c8e9f0..731b2164ab6 100644 --- a/cubrid/CMakeLists.txt +++ b/cubrid/CMakeLists.txt @@ -211,6 +211,7 @@ set(QUERY_SOURCES ${QUERY_DIR}/query_executor.c ${QUERY_DIR}/query_manager.c ${QUERY_DIR}/query_opfunc.c + ${QUERY_DIR}/scan_json_table.cpp ${QUERY_DIR}/scan_manager.c ${QUERY_DIR}/serial.c ${QUERY_DIR}/set_scan.c @@ -220,6 +221,9 @@ set(QUERY_SOURCES ${QUERY_DIR}/vacuum.c ${QUERY_DIR}/xasl_cache.c ) +set(QUERY_HEADERS + ${QUERY_DIR}/scan_json_table.hpp + ) set(OBJECT_SOURCES ${OBJECT_DIR}/elo.c @@ -294,6 +298,16 @@ set(SESSION_SOURCES ${SESSION_DIR}/session_sr.c ) +set(XASL_SOURCES + ${XASL_DIR}/access_json_table.cpp + ${XASL_DIR}/xasl_stream.cpp + ) +set(XASL_HEADERS + ${XASL_DIR}/access_json_table.hpp + ${XASL_DIR}/compile_context.h + ${XASL_DIR}/xasl_stream.hpp + ) + list(APPEND CONNECTION_SOURCES ${CONNECTION_DIR}/heartbeat.c) if(UNIX) list(APPEND BASE_SOURCES ${BASE_DIR}/cubrid_getopt_long.c) @@ -332,6 +346,7 @@ SET_SOURCE_FILES_PROPERTIES( ${STORAGE_SOURCES} ${THREAD_SOURCES} ${TRANSACTION_SOURCES} + ${XASL_SOURCES} PROPERTIES LANGUAGE CXX ) @@ -360,6 +375,7 @@ set (CUBRID_LIB_SOURCES ${STREAM_SOURCES} ${THREAD_SOURCES} ${TRANSACTION_SOURCES} + ${XASL_SOURCES} ) set (CUBRID_LIB_HEADERS @@ -367,9 +383,11 @@ set (CUBRID_LIB_HEADERS ${COMMUNICATION_HEADERS} ${COMPAT_HEADERS} ${MONITOR_HEADERS} + ${QUERY_HEADERS} ${REPLICATION_HEADERS} ${THREAD_HEADERS} ${STREAM_HEADERS} + ${XASL_HEADERS} ) add_library(cubrid SHARED @@ -453,6 +471,9 @@ install(FILES install(FILES ${COMPAT_DIR}/dbtype_def.h ${COMPAT_DIR}/dbtype_function.h + ${COMPAT_DIR}/db_date.h + ${COMPAT_DIR}/db_elo.h + ${COMPAT_DIR}/cache_time.h DESTINATION ${CUBRID_INCLUDEDIR} COMPONENT Header ) install(FILES diff --git a/msg/de_DE.utf8/cubrid.msg b/msg/de_DE.utf8/cubrid.msg index 14f2e3906bf..e73e85f9fca 100644 --- a/msg/de_DE.utf8/cubrid.msg +++ b/msg/de_DE.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Letzter Fehler +1240 Letzter Fehler $set 6 MSGCAT_SET_INTERNAL 1 Fehler in Fehler-Subsystem (Zeile %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 ungültige Verwendung von sessiontimezone\n sessiontimezone 126 Maximale Länge einer class/vclass Kommentar ist %1$d bytes. 127 Maximale Länge eines Kommentars ist %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Außer Speicher. diff --git a/msg/en_US.utf8/cubrid.msg b/msg/en_US.utf8/cubrid.msg index 4ca842435dc..b89ad44de75 100644 --- a/msg/en_US.utf8/cubrid.msg +++ b/msg/en_US.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Last Error +1240 Last Error $set 6 MSGCAT_SET_INTERNAL 1 Error in error subsystem (line %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 invalid use of sessiontimezone\n sessiontimezone 126 Maximum length of a class/vclass comment is %1$d bytes. 127 Maximum length of a comment is %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Out of memory. diff --git a/msg/en_US/cubrid.msg b/msg/en_US/cubrid.msg index acd3f56c50a..5de8d6c3b80 100644 --- a/msg/en_US/cubrid.msg +++ b/msg/en_US/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Last Error +1240 Last Error $set 6 MSGCAT_SET_INTERNAL 1 Error in error subsystem (line %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 invalid use of sessiontimezone\n sessiontimezone 126 Maximum length of a class/vclass comment is %1$d bytes. 127 Maximum length of a comment is %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Out of memory. diff --git a/msg/es_ES.utf8/cubrid.msg b/msg/es_ES.utf8/cubrid.msg index 29ab4b2aba1..1c09bc927ac 100644 --- a/msg/es_ES.utf8/cubrid.msg +++ b/msg/es_ES.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Last Error +1240 Last Error $set 6 MSGCAT_SET_INTERNAL 1 Error en subsistema de error (linea %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 uso invalido de sessiontimezone\n sessiontimezone 126 Longitud maxima de un comentario class/vclass es %1$d bytes. 127 Longitud maxima de un comentario es %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Sin memoria. diff --git a/msg/fr_FR.utf8/cubrid.msg b/msg/fr_FR.utf8/cubrid.msg index e23b454d39b..305532d4fb0 100644 --- a/msg/fr_FR.utf8/cubrid.msg +++ b/msg/fr_FR.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Dernière erreur +1240 Dernière erreur $set 6 MSGCAT_SET_INTERNAL 1 Erreur dans le sous-système d'erreur (ligne %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 Utilisation non valide de sessiontimezone\n sessiontimezone 126 La dimension maximale d'un commentaire class/vclass est %1$d octets. 127 La dimension maximale d'un commentaire est %1$d octets. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Mémoire épuisée. diff --git a/msg/it_IT.utf8/cubrid.msg b/msg/it_IT.utf8/cubrid.msg index 5c7cc4a02a5..a441e2ddff3 100644 --- a/msg/it_IT.utf8/cubrid.msg +++ b/msg/it_IT.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Ultimo errore +1240 Ultimo errore $set 6 MSGCAT_SET_INTERNAL 1 Errore nel sottosistema di errore (linea %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 uso non valido di sessiontimezone\n sessiontimezone 126 Lunghezza massima di class/vclass comment è %1$d bytes. 127 Lunghezza massima di un commento è %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Memoria esaurita. diff --git a/msg/ja_JP.utf8/cubrid.msg b/msg/ja_JP.utf8/cubrid.msg index 5872554d816..9a739928bc1 100644 --- a/msg/ja_JP.utf8/cubrid.msg +++ b/msg/ja_JP.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 ラストエラー +1240 ラストエラー $set 6 MSGCAT_SET_INTERNAL 1 エラーサブシステムにエラー発生(ライン %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 sessiontimezoneの使用が無効です\n sessiontimezone 126 class/vclassコメントの最大長は%1$dバイトです。 127 コメントの最大長は%1$dバイトです。 +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 メモリーが足りません。 diff --git a/msg/km_KH.utf8/cubrid.msg b/msg/km_KH.utf8/cubrid.msg index 4ca842435dc..b89ad44de75 100644 --- a/msg/km_KH.utf8/cubrid.msg +++ b/msg/km_KH.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Last Error +1240 Last Error $set 6 MSGCAT_SET_INTERNAL 1 Error in error subsystem (line %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 invalid use of sessiontimezone\n sessiontimezone 126 Maximum length of a class/vclass comment is %1$d bytes. 127 Maximum length of a comment is %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Out of memory. diff --git a/msg/ko_KR.euckr/cubrid.msg b/msg/ko_KR.euckr/cubrid.msg index 189a76ad52c..294105569e9 100644 --- a/msg/ko_KR.euckr/cubrid.msg +++ b/msg/ko_KR.euckr/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 +1240 $set 6 MSGCAT_SET_INTERNAL 1 ýۿ ߻( %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 ߸ sessiontimezone\n sessiontimezone 126 ̺ ĿƮ ִ ̴ %1$d ƮԴϴ. 127 ĿƮ ִ ̴ %1$d ƮԴϴ. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 ޸𸮰 մϴ diff --git a/msg/ko_KR.utf8/cubrid.msg b/msg/ko_KR.utf8/cubrid.msg index 236bc1ad375..87b63d18b18 100644 --- a/msg/ko_KR.utf8/cubrid.msg +++ b/msg/ko_KR.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 마지막 에러 +1240 마지막 에러 $set 6 MSGCAT_SET_INTERNAL 1 에러 서브 시스템에 에러 발생(라인 %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 잘못된 형식의 sessiontimezone\n sessiontimezone 126 테이블과 뷰 커맨트의 최대 길이는 %1$d 바이트입니다. 127 커맨트의 최대 길이는 %1$d 바이트입니다. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 메모리가 부족합니다 diff --git a/msg/ro_RO.utf8/cubrid.msg b/msg/ro_RO.utf8/cubrid.msg index 295f1f7c932..dbb5962a592 100644 --- a/msg/ro_RO.utf8/cubrid.msg +++ b/msg/ro_RO.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer este dezactivat. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Ultima eroare +1240 Ultima eroare $set 6 MSGCAT_SET_INTERNAL 1 Eroare în subsistemul de erori (linia %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 utilizare invalidă a sessiontimezone sessiontimezone\n 126 Lungimea maximă a comentariilor class/vclass este %1$d bytes. 127 Lungimea maximă a comentariilor este %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Memorie epuizată. diff --git a/msg/tr_TR.utf8/cubrid.msg b/msg/tr_TR.utf8/cubrid.msg index 1b9d82386b1..d5f8eabbd76 100644 --- a/msg/tr_TR.utf8/cubrid.msg +++ b/msg/tr_TR.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Son Hata +1240 Son Hata $set 6 MSGCAT_SET_INTERNAL 1 Alt Hata içinde hata (satır %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 sessiontimezone geçersiz kullanımı\n sessiontimezone 126 Bir class/vclass yorumun maksimum uzunluğu %1$d byte. 127 Bir yorumun maksimum uzunluğu %1$d byte. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Bellek yetersiz. diff --git a/msg/vi_VN.utf8/cubrid.msg b/msg/vi_VN.utf8/cubrid.msg index 4ca842435dc..2edd2e23f0f 100644 --- a/msg/vi_VN.utf8/cubrid.msg +++ b/msg/vi_VN.utf8/cubrid.msg @@ -1304,8 +1304,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 Last Error +1240 Last Error $set 6 MSGCAT_SET_INTERNAL 1 Error in error subsystem (line %1$d): @@ -1461,6 +1463,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 invalid use of sessiontimezone\n sessiontimezone 126 Maximum length of a class/vclass comment is %1$d bytes. 127 Maximum length of a comment is %1$d bytes. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 Out of memory. diff --git a/msg/zh_CN.utf8/cubrid.msg b/msg/zh_CN.utf8/cubrid.msg index e81dac430a8..c600fbccae9 100644 --- a/msg/zh_CN.utf8/cubrid.msg +++ b/msg/zh_CN.utf8/cubrid.msg @@ -1305,8 +1305,10 @@ $ LOADDB 1235 Reserved error for stream/replication. 1236 Double Write Buffer is disabled. 1237 Cannot change status on %1$s because %2$s is a %3$s. +1238 Missing value for JSON_TABLE column '%1$s'. +1239 Cannot coerce value from JSON "%1$s" at path "%2$s" to JSON_TABLE column "%3$s" of domain "%4$s". -1238 最后一个错误. +1240 最后一个错误. $set 6 MSGCAT_SET_INTERNAL 1 在错误子系统中错误 (line %1$d): @@ -1462,6 +1464,7 @@ $set 7 MSGCAT_SET_PARSER_SYNTAX 125 非法使用 sessiontimezone\n sessiontimezone 126 一个class或vclass的comment的最大长度是 %1$d 字节. 127 一个comment的最大长度是 %1$d 字节. +128 Invalid number of arguments for JSON_OBJECTAGG $set 8 MSGCAT_SET_PARSER_SEMANTIC 1 内存耗尽. diff --git a/sa/CMakeLists.txt b/sa/CMakeLists.txt index a642776683f..82a70247d91 100644 --- a/sa/CMakeLists.txt +++ b/sa/CMakeLists.txt @@ -181,32 +181,36 @@ set(OPTIMIZER_SOURCES ) set(QUERY_SOURCES - ${QUERY_DIR}/execute_schema.c - ${QUERY_DIR}/execute_statement.c ${QUERY_DIR}/arithmetic.c + ${QUERY_DIR}/crypt_opfunc.c ${QUERY_DIR}/cursor.c - ${QUERY_DIR}/query_dump.c - ${QUERY_DIR}/query_evaluator.c + ${QUERY_DIR}/execute_schema.c + ${QUERY_DIR}/execute_statement.c ${QUERY_DIR}/fetch.c ${QUERY_DIR}/filter_pred_cache.c ${QUERY_DIR}/list_file.c + ${QUERY_DIR}/method_scan.c ${QUERY_DIR}/numeric_opfunc.c + ${QUERY_DIR}/partition.c ${QUERY_DIR}/query_cl.c + ${QUERY_DIR}/query_dump.c + ${QUERY_DIR}/query_evaluator.c + ${QUERY_DIR}/query_executor.c ${QUERY_DIR}/query_manager.c + ${QUERY_DIR}/query_opfunc.c + ${QUERY_DIR}/scan_json_table.cpp ${QUERY_DIR}/scan_manager.c ${QUERY_DIR}/serial.c ${QUERY_DIR}/set_scan.c - ${QUERY_DIR}/string_opfunc.c - ${QUERY_DIR}/method_scan.c - ${QUERY_DIR}/query_executor.c ${QUERY_DIR}/show_scan.c - ${QUERY_DIR}/partition.c - ${QUERY_DIR}/query_opfunc.c - ${QUERY_DIR}/xasl_to_stream.c ${QUERY_DIR}/stream_to_xasl.c - ${QUERY_DIR}/crypt_opfunc.c + ${QUERY_DIR}/string_opfunc.c ${QUERY_DIR}/vacuum.c ${QUERY_DIR}/xasl_cache.c + ${QUERY_DIR}/xasl_to_stream.c + ) +set(QUERY_HEADERS + ${QUERY_DIR}/scan_json_table.hpp ) set(OBJECT_SOURCES @@ -339,6 +343,16 @@ set(SESSION_SOURCES ${SESSION_DIR}/session_sr.c ) +set(XASL_SOURCES + ${XASL_DIR}/access_json_table.cpp + ${XASL_DIR}/xasl_stream.cpp + ) +set(XASL_HEADERS + ${XASL_DIR}/access_json_table.hpp + ${XASL_DIR}/compile_context.h + ${XASL_DIR}/xasl_stream.hpp + ) + if(UNIX) list(APPEND CONNECTION_SOURCES ${CONNECTION_DIR}/tcp.c) list(APPEND BASE_SOURCES ${BASE_DIR}/dynamic_load.c) @@ -411,6 +425,7 @@ SET_SOURCE_FILES_PROPERTIES( ${ES_SOURCES} ${SESSION_SOURCES} ${PROBES_OBJECT} + ${XASL_SOURCES} PROPERTIES LANGUAGE CXX ) @@ -437,6 +452,7 @@ add_library(cubridsa SHARED ${COMMUNICATION_SOURCES} ${PARSER_SOURCES} ${OPTIMIZER_SOURCES} + ${QUERY_HEADERS} ${QUERY_SOURCES} ${OBJECT_SOURCES} ${REPLICATION_HEADERS} @@ -451,6 +467,8 @@ add_library(cubridsa SHARED ${SESSION_SOURCES} ${PROBES_OBJECT} ${COMPAT_SOURCES_C} + ${XASL_SOURCES} + ${XASL_HEADERS} ) set_target_properties(cubridsa PROPERTIES SOVERSION "${CUBRID_MAJOR_VERSION}.${CUBRID_MINOR_VERSION}") diff --git a/src/api/api_common.h b/src/api/api_common.h index df9f10e6f3c..c70a1d42d22 100644 --- a/src/api/api_common.h +++ b/src/api/api_common.h @@ -24,7 +24,6 @@ #ifndef _API_COMMON_H_ #define _API_COMMON_H_ -#include "cubrid_api.h" #include "api_handle.h" /* diff --git a/src/api/cubrid_api.h b/src/api/cubrid_api.h index 573ff19e1ba..a541c58db86 100644 --- a/src/api/cubrid_api.h +++ b/src/api/cubrid_api.h @@ -23,213 +23,4 @@ #ifndef _CUBRID_API_H_ #define _CUBRID_API_H_ - -#include "config.h" -#include -#include "error_code.h" -#include "dbtype_def.h" - -#define IS_VALID_ISOLATION_LEVEL(isolation_level) \ - (TRAN_MINVALUE_ISOLATION <= (isolation_level) \ - && (isolation_level) <= TRAN_MAXVALUE_ISOLATION) - -#define TRAN_DEFAULT_ISOLATION_LEVEL() (TRAN_DEFAULT_ISOLATION) - -/* UNUSED PART --> */ - -typedef UINT64 CI_CONNECTION; -typedef UINT64 CI_STATEMENT; -typedef UINT64 CI_PARAMETER_METADATA; -typedef UINT64 CI_RESULTSET_METADATA; -typedef UINT64 CI_RESULTSET; -typedef UINT64 CI_BATCH_RESULT; -typedef struct ci_oid_s CI_OID; -typedef struct ci_time_s CI_TIME; -typedef void *CI_COLLECTION; - -struct ci_time_s -{ - short year; - short month; - short day; - short hour; - short minute; - short second; - short millisecond; -}; - - -struct ci_oid_s -{ - int d1; - int d2; - CI_CONNECTION conn; -}; - -enum ci_fetch_position -{ - CI_FETCH_POSITION_FIRST = 1, - CI_FETCH_POSITION_CURRENT = 2, - CI_FETCH_POSITION_LAST = 3 -}; -typedef enum ci_fetch_position CI_FETCH_POSITION; - -enum ci_type -{ - CI_TYPE_NULL = 0, - CI_TYPE_INT = 1, - CI_TYPE_SHORT, - CI_TYPE_FLOAT, - CI_TYPE_DOUBLE, - CI_TYPE_CHAR, - CI_TYPE_VARCHAR, - CI_TYPE_NCHAR, - CI_TYPE_VARNCHAR, - CI_TYPE_BIT, - CI_TYPE_VARBIT, - CI_TYPE_TIME, - CI_TYPE_DATE, - CI_TYPE_TIMESTAMP, - CI_TYPE_MONETARY, - CI_TYPE_NUMERIC, - CI_TYPE_OID, - CI_TYPE_COLLECTION, - CI_TYPE_BIGINT, - CI_TYPE_DATETIME -}; -typedef enum ci_type CI_TYPE; - -enum ci_conn_option -{ - CI_CONNECTION_OPTION_CLIENT_VERSION = 1, - CI_CONNECTION_OPTION_SERVER_VERSION = 2, - CI_CONNECTION_OPTION_LOCK_TIMEOUT = 3, - CI_CONNECTION_OPTION_TRAN_ISOLATION_LV = 4, - CI_CONNECTION_OPTION_AUTOCOMMIT = 5 -}; -typedef enum ci_conn_option CI_CONNECTION_OPTION; - -enum ci_stmt_option -{ - CI_STATEMENT_OPTION_HOLD_CURSORS_OVER_COMMIT = 1, - CI_STATEMENT_OPTION_UPDATABLE_RESULT = 2, - CI_STATEMENT_OPTION_ASYNC_QUERY = 3, /* obsoleted */ - CI_STATEMENT_OPTION_EXEC_CONTINUE_ON_ERROR = 4, - CI_STATEMENT_OPTION_GET_GENERATED_KEYS = 5, - CI_STATEMENT_OPTION_LAZY_EXEC = 6, -}; -typedef enum ci_stmt_option CI_STATEMENT_OPTION; - -enum ci_rmeta_info_type -{ - CI_RMETA_INFO_COL_LABEL = 1, - CI_RMETA_INFO_COL_NAME = 2, - CI_RMETA_INFO_COL_TYPE = 3, - CI_RMETA_INFO_PRECISION = 4, - CI_RMETA_INFO_SCALE = 5, - CI_RMETA_INFO_TABLE_NAME = 7, - CI_RMETA_INFO_IS_AUTO_INCREMENT = 8, - CI_RMETA_INFO_IS_NULLABLE = 9, - CI_RMETA_INFO_IS_WRITABLE = 10 -}; -typedef enum ci_rmeta_info_type CI_RMETA_INFO_TYPE; - -enum ci_pmeta_info_type -{ - CI_PMETA_INFO_MODE = 1, - CI_PMETA_INFO_COL_TYPE = 2, - CI_PMETA_INFO_PRECISION = 3, - CI_PMETA_INFO_SCALE = 4, - CI_PMETA_INFO_NULLABLE = 5 -}; -typedef enum ci_pmeta_info_type CI_PMETA_INFO_TYPE; - -enum ci_param_mode -{ - CI_PARAM_MODE_IN = 0, - CI_PARAM_MODE_OUT = 1 -}; -typedef enum ci_param_mode CI_PARAMETER_MODE; - -extern int ci_create_connection (CI_CONNECTION * conn); -extern int ci_conn_connect (CI_CONNECTION conn, const char *host, unsigned short port, const char *databasename, - const char *user_name, const char *password); -extern int ci_conn_close (CI_CONNECTION conn); -extern int ci_conn_create_statement (CI_CONNECTION conn, CI_STATEMENT * stmt); -extern int ci_conn_set_option (CI_CONNECTION conn, CI_CONNECTION_OPTION option, void *arg, size_t size); -extern int ci_conn_get_option (CI_CONNECTION conn, CI_CONNECTION_OPTION option, void *arg, size_t size); - -extern int ci_conn_commit (CI_CONNECTION conn); -extern int ci_conn_rollback (CI_CONNECTION conn); -extern int ci_conn_get_error (CI_CONNECTION handle, int *err, char *msg, size_t size); - -extern int ci_stmt_close (CI_STATEMENT stmt); -extern int ci_stmt_execute_immediate (CI_STATEMENT stmt, char *sql, size_t len, CI_RESULTSET * rs, int *r); -extern int ci_stmt_execute (CI_STATEMENT stmt, CI_RESULTSET * rs, int *r); -extern int ci_stmt_get_option (CI_STATEMENT stmt, CI_STATEMENT_OPTION option, void *arg, size_t size); -extern int ci_stmt_set_option (CI_STATEMENT stmt, CI_STATEMENT_OPTION option, void *arg, size_t size); -extern int ci_stmt_prepare (CI_STATEMENT stmt, const char *sql, size_t len); -extern int ci_stmt_register_out_parameter (CI_STATEMENT stmt, int index); -extern int ci_stmt_get_resultset_metadata (CI_STATEMENT stmt, CI_RESULTSET_METADATA * r); - -extern int ci_stmt_get_parameter_metadata (CI_STATEMENT stmt, CI_PARAMETER_METADATA * r); -extern int ci_stmt_get_parameter (CI_STATEMENT stmt, int index, CI_TYPE type, void *addr, size_t len, size_t * outlen, - bool * isnull); -extern int ci_stmt_set_parameter (CI_STATEMENT stmt, int index, CI_TYPE type, void *val, size_t size); -extern int ci_stmt_get_resultset (CI_STATEMENT stmt, CI_RESULTSET * res); - -extern int ci_stmt_affected_rows (CI_STATEMENT stmt, int *out); - -extern int ci_stmt_get_query_type (CI_STATEMENT stmt, CUBRID_STMT_TYPE * type); - -extern int ci_stmt_get_start_line (CI_STATEMENT stmt, int *line); -extern int ci_stmt_next_result (CI_STATEMENT stmt, bool * exist_result); -extern int ci_res_get_resultset_metadata (CI_RESULTSET res, CI_RESULTSET_METADATA * r); -extern int ci_res_fetch (CI_RESULTSET res, int offset, CI_FETCH_POSITION pos); - -extern int ci_res_fetch_tell (CI_RESULTSET res, int *offset); -extern int ci_res_clear_updates (CI_RESULTSET res); -extern int ci_res_delete_row (CI_RESULTSET res); -extern int ci_res_get_value (CI_RESULTSET res, int index, CI_TYPE type, void *addr, size_t len, size_t * outlen, - bool * isnull); - -extern int ci_res_get_value_by_name (CI_RESULTSET res, const char *name, CI_TYPE type, void *addr, size_t len, - size_t * outlen, bool * isnull); -extern int ci_res_update_value (CI_RESULTSET res, int index, CI_TYPE type, void *addr, size_t len); -extern int ci_res_apply_row (CI_RESULTSET res); -extern int ci_res_close (CI_RESULTSET res); - -extern int ci_pmeta_get_count (CI_PARAMETER_METADATA pmeta, int *count); -extern int ci_pmeta_get_info (CI_PARAMETER_METADATA pmeta, int index, CI_PMETA_INFO_TYPE type, void *arg, size_t size); -extern int ci_rmeta_get_count (CI_RESULTSET_METADATA rmeta, int *count); -extern int ci_rmeta_get_info (CI_RESULTSET_METADATA rmeta, int index, CI_RMETA_INFO_TYPE type, void *arg, size_t size); -extern int ci_stmt_get_first_error (CI_STATEMENT stmt, int *line, int *col, int *errcode, char *err_msg, size_t size); -extern int ci_stmt_get_next_error (CI_STATEMENT stmt, int *line, int *col, int *errcode, char *err_msg, size_t size); - -extern int ci_stmt_add_batch_query (CI_STATEMENT stmt, const char *sql, size_t len); -extern int ci_stmt_add_batch (CI_STATEMENT stmt); -extern int ci_stmt_execute_batch (CI_STATEMENT stmt, CI_BATCH_RESULT * br); -extern int ci_stmt_clear_batch (CI_STATEMENT stmt); - -extern int ci_batch_res_query_count (CI_BATCH_RESULT br, int *count); -extern int ci_batch_res_get_result (CI_BATCH_RESULT br, int index, int *ret, int *nr); -extern int ci_batch_res_get_error (CI_BATCH_RESULT br, int index, int *err_code, char *err_msg, size_t buf_size); - -extern int ci_oid_delete (CI_OID * oid); -extern int ci_oid_get_classname (CI_OID * oid, char *name, size_t size); -extern int ci_oid_get_resultset (CI_OID * oid, CI_RESULTSET * rs); - -extern int ci_collection_new (CI_CONNECTION conn, CI_COLLECTION * coll); -extern int ci_collection_free (CI_COLLECTION coll); -extern int ci_collection_length (CI_COLLECTION coll, long *length); -extern int ci_collection_insert (CI_COLLECTION coll, long pos, CI_TYPE type, void *ptr, size_t size); -extern int ci_collection_update (CI_COLLECTION coll, long pos, CI_TYPE type, void *ptr, size_t size); -extern int ci_collection_delete (CI_COLLECTION coll, long pos); -extern int ci_collection_get_elem_domain_info (CI_COLLECTION coll, long pos, CI_TYPE * type, int *precision, - int *scale); -extern int ci_collection_get (CI_COLLECTION coll, long pos, CI_TYPE type, void *addr, size_t len, size_t * outlen, - bool * isnull); - -/* UNUSED PART <-- */ - #endif /* _CUBRID_API_H_ */ diff --git a/src/base/error_code.h b/src/base/error_code.h index 1c8c0bd5623..321ff0a3c50 100644 --- a/src/base/error_code.h +++ b/src/base/error_code.h @@ -1584,8 +1584,10 @@ #define ER_DWB_DISABLED -1236 #define ER_SM_INDEX_STATUS_CHANGE_NOT_ALLOWED -1237 +#define ER_JSON_TABLE_ON_EMPTY_ERROR -1238 +#define ER_JSON_TABLE_ON_ERROR_INCOMP_DOMAIN -1239 -#define ER_LAST_ERROR -1238 +#define ER_LAST_ERROR -1240 /* * CAUTION! diff --git a/src/base/error_manager.c b/src/base/error_manager.c index 38d81eefdf1..d5f693f9688 100644 --- a/src/base/error_manager.c +++ b/src/base/error_manager.c @@ -315,6 +315,13 @@ static bool er_Has_sticky_init = false; static bool er_Isa_null_device = false; static int er_Exit_ask = ER_EXIT_DEFAULT; static int er_Print_to_console = ER_DO_NOT_PRINT; +/* TODO : remove this when applylogdb and copylogdb are removed + * multithreaded client processes which start+end database (and error module) in a loop, may need to log errors on + * other threads (while error module is stopped); this flag prevents assertion failure of error module initialization +* for such case */ +#if defined (CS_MODE) +static bool er_Ignore_uninit = false; +#endif #if !defined (SERVER_MODE) // requires own context @@ -1391,6 +1398,13 @@ er_set_internal (int severity, const char *file_name, const int line_no, int err if (er_Hasalready_initiated == false) { +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (er_Ignore_uninit) + { + return ER_FAILED; + } +#endif assert (false); er_Errid_not_initialized = err_id; return ER_FAILED; @@ -1800,6 +1814,13 @@ er_errid (void) { if (!er_Hasalready_initiated) { +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (er_Ignore_uninit) + { + return er_Errid_not_initialized; + } +#endif assert (false); return er_Errid_not_initialized; } @@ -1832,6 +1853,13 @@ void er_clearid (void) { // todo: is this necessary? +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (!er_Hasalready_initiated && er_Ignore_uninit) + { + return; + } +#endif assert (er_Hasalready_initiated); context::get_thread_local_error ().err_id = NO_ERROR; @@ -1846,6 +1874,13 @@ void er_setid (int err_id) { // todo: is this necessary? +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (!er_Hasalready_initiated && er_Ignore_uninit) + { + return; + } +#endif assert (er_Hasalready_initiated); context::get_thread_local_error ().err_id = err_id; @@ -1883,6 +1918,14 @@ er_has_error (void) const char * er_msg (void) { +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (!er_Hasalready_initiated && er_Ignore_uninit) + { + return "Not available"; + } +#endif + if (!er_Hasalready_initiated) { assert (false); @@ -1944,6 +1987,14 @@ _er_log_debug (const char *file_name, const int line_no, const char *fmt, ...) va_list ap; int r = NO_ERROR; +#if defined (CS_MODE) && !defined (NDEBUG) + /* temporary workaround for HA process which may encounter missing er_module */ + if (!er_Hasalready_initiated && er_Ignore_uninit) + { + return; + } +#endif + assert (er_Hasalready_initiated); // *INDENT-OFF* @@ -3176,6 +3227,14 @@ er_is_error_severity (er_severity severity) } } +#if defined (CS_MODE) +void +er_set_ignore_uninit (bool ignore) +{ + er_Ignore_uninit = ignore; +} +#endif + /* *INDENT-OFF* */ namespace cuberr { diff --git a/src/base/error_manager.h b/src/base/error_manager.h index b78ccb411e3..b26d0ed565a 100644 --- a/src/base/error_manager.h +++ b/src/base/error_manager.h @@ -222,6 +222,9 @@ extern "C" extern bool er_has_error (void); extern void er_print_callstack (const char *file_name, const int line_no, const char *fmt, ...); +#if defined (CS_MODE) + extern void er_set_ignore_uninit (bool ignore); +#endif #ifdef __cplusplus } diff --git a/src/base/language_support.c b/src/base/language_support.c index 8884c75bff1..2aa834efb09 100644 --- a/src/base/language_support.c +++ b/src/base/language_support.c @@ -41,6 +41,7 @@ #if !defined(WINDOWS) #include #endif /* !defined (WINDOWS) */ +#include "tz_support.h" #include "db_date.h" #include "string_opfunc.h" diff --git a/src/base/object_representation_sr.c b/src/base/object_representation_sr.c index 671b322e6bc..ac9db95520e 100644 --- a/src/base/object_representation_sr.c +++ b/src/base/object_representation_sr.c @@ -4067,4 +4067,3 @@ or_install_btids_function_info (DB_SEQ * fi_seq, OR_INDEX * index) return; } - diff --git a/src/base/perf_monitor.c b/src/base/perf_monitor.c index 99ef75c2a49..04d16b79e4e 100644 --- a/src/base/perf_monitor.c +++ b/src/base/perf_monitor.c @@ -262,10 +262,6 @@ PSTAT_METADATA pstat_Metadata[] = { PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_BT_NUM_MERGES, "Num_btree_merges"), PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_BT_NUM_GET_STATS, "Num_btree_get_stats"), - /* Execution statistics for the heap manager */ - /* TODO: Move this to heap section. TODO: count and timer. */ - PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_HEAP_NUM_STATS_SYNC_BESTSPACE, "Num_heap_stats_sync_bestspace"), - /* Execution statistics for the query manager */ PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_QM_NUM_SELECTS, "Num_query_selects"), PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_QM_NUM_INSERTS, "Num_query_inserts"), @@ -298,10 +294,6 @@ PSTAT_METADATA pstat_Metadata[] = { PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_PRIOR_LSA_LIST_MAXED, "Num_prior_lsa_list_maxed"), PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_PRIOR_LSA_LIST_REMOVED, "Num_prior_lsa_list_removed"), - /* best space info */ - PSTAT_METADATA_INIT_SINGLE_PEEK (PSTAT_HF_NUM_STATS_ENTRIES, "Num_heap_stats_bestspace_entries"), - PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_HF_NUM_STATS_MAXED, "Num_heap_stats_bestspace_maxed"), - /* HA replication delay */ PSTAT_METADATA_INIT_SINGLE_PEEK (PSTAT_HA_REPL_DELAY, "Time_ha_replication_delay"), @@ -365,6 +357,17 @@ PSTAT_METADATA pstat_Metadata[] = { PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HEAP_VACUUM_EXECUTE, "heap_vacuum_execute"), PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HEAP_VACUUM_LOG, "heap_vacuum_log"), + /* Execution statistics for the heap manager */ + /* best space info */ + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HEAP_STATS_SYNC_BESTSPACE, "heap_stats_sync_bestspace"), + PSTAT_METADATA_INIT_SINGLE_PEEK (PSTAT_HF_NUM_STATS_ENTRIES, "Num_heap_stats_bestspace_entries"), + PSTAT_METADATA_INIT_SINGLE_ACC (PSTAT_HF_NUM_STATS_MAXED, "Num_heap_stats_bestspace_maxed"), + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HF_BEST_SPACE_ADD, "bestspace_add"), + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HF_BEST_SPACE_DEL, "bestspace_del"), + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HF_BEST_SPACE_FIND, "bestspace_find"), + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HF_HEAP_FIND_PAGE_BEST_SPACE, "heap_find_page_bestspace"), + PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_HF_HEAP_FIND_BEST_PAGE, "heap_find_best_page"), + /* B-tree detailed statistics. */ PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_BT_FIX_OVF_OIDS, "bt_fix_ovf_oids"), PSTAT_METADATA_INIT_COUNTER_TIMER (PSTAT_BT_UNIQUE_RLOCKS, "bt_unique_rlocks"), diff --git a/src/base/perf_monitor.h b/src/base/perf_monitor.h index bcd70a175e7..ef5a3875c77 100644 --- a/src/base/perf_monitor.h +++ b/src/base/perf_monitor.h @@ -339,9 +339,6 @@ typedef enum PSTAT_BT_NUM_MERGES, PSTAT_BT_NUM_GET_STATS, - /* Execution statistics for the heap manager */ - PSTAT_HEAP_NUM_STATS_SYNC_BESTSPACE, - /* Execution statistics for the query manager */ PSTAT_QM_NUM_SELECTS, PSTAT_QM_NUM_INSERTS, @@ -374,10 +371,6 @@ typedef enum PSTAT_PRIOR_LSA_LIST_MAXED, PSTAT_PRIOR_LSA_LIST_REMOVED, - /* best space info */ - PSTAT_HF_NUM_STATS_ENTRIES, - PSTAT_HF_NUM_STATS_MAXED, - /* HA replication delay */ PSTAT_HA_REPL_DELAY, @@ -439,6 +432,17 @@ typedef enum PSTAT_HEAP_VACUUM_EXECUTE, PSTAT_HEAP_VACUUM_LOG, + /* Execution statistics for the heap manager */ + /* best space info */ + PSTAT_HEAP_STATS_SYNC_BESTSPACE, + PSTAT_HF_NUM_STATS_ENTRIES, + PSTAT_HF_NUM_STATS_MAXED, + PSTAT_HF_BEST_SPACE_ADD, + PSTAT_HF_BEST_SPACE_DEL, + PSTAT_HF_BEST_SPACE_FIND, + PSTAT_HF_HEAP_FIND_PAGE_BEST_SPACE, + PSTAT_HF_HEAP_FIND_BEST_PAGE, + /* B-tree ops detailed statistics. */ PSTAT_BT_FIX_OVF_OIDS, PSTAT_BT_UNIQUE_RLOCKS, diff --git a/src/base/release_string.c b/src/base/release_string.c index 1f69d9adadd..f67ea2411b9 100644 --- a/src/base/release_string.c +++ b/src/base/release_string.c @@ -99,7 +99,7 @@ static REL_COMPATIBILITY rel_get_compatible_internal (const char *base_rel_str, /* * Disk (database image) Version Compatibility */ -static float disk_compatibility_level = 10.13f; +static float disk_compatibility_level = 10.15f; /* * rel_copy_version_string - version string of the product diff --git a/src/base/system_parameter.c b/src/base/system_parameter.c index 2455ebf7ba9..8d23977f21d 100644 --- a/src/base/system_parameter.c +++ b/src/base/system_parameter.c @@ -636,7 +636,6 @@ static const char sysprm_ha_conf_file_name[] = "cubrid_ha.conf"; #define PRM_NAME_DWB_SIZE "double_write_buffer_size" #define PRM_NAME_DWB_BLOCKS "double_write_buffer_blocks" #define PRM_NAME_ENABLE_DWB_FLUSH_THREAD "double_write_buffer_enable_flush_thread" -#define PRM_NAME_ENABLE_DWB_CHECKSUM_THREAD "double_write_buffer_enable_checksum_thread" #define PRM_NAME_DWB_LOGGING "double_write_buffer_logging" #define PRM_NAME_JSON_LOG_ALLOCATIONS "json_log_allocations" @@ -655,6 +654,8 @@ static const char sysprm_ha_conf_file_name[] = "cubrid_ha.conf"; #define PRM_NAME_REPL_GENERATOR_BUFFER_SIZE "replication_generator_buffer_size" #define PRM_NAME_REPL_CONSUMER_BUFFER_SIZE "replication_consumer_buffer_size" +#define PRM_NAME_DATA_FILE_ADVISE "data_file_os_advise" + #define PRM_VALUE_DEFAULT "DEFAULT" #define PRM_VALUE_MAX "MAX" #define PRM_VALUE_MIN "MIN" @@ -2161,11 +2162,6 @@ bool PRM_ENABLE_DWB_FLUSH_THREAD = true; static bool prm_enable_dwb_flush_thread_default = true; static unsigned int prm_enable_dwb_flush_thread_flag = 0; -bool PRM_ENABLE_DWB_CHECKSUM_THREAD = true; -static unsigned int prm_dwb_checksum_thread_flag = 0; -static bool prm_enable_dwb_checksum_thread_default = true; -static unsigned int prm_enable_dwb_checksum_thread_flag = 0; - bool PRM_DWB_LOGGING = false; static bool prm_dwb_logging_default = false; static unsigned int prm_dwb_logging_flag = 0; @@ -2180,6 +2176,12 @@ static UINT64 prm_repl_consumer_buffer_size_default = 10 * 1024 * 1024; static UINT64 prm_repl_consumer_buffer_size_lower = 100 * 1024; static unsigned int prm_repl_consumer_buffer_size_flag = 0; +int PRM_DATA_FILE_ADVISE = 0; + +static int prm_data_file_advise_default = 0; + +static unsigned int prm_data_file_advise_flag = 0; + typedef int (*DUP_PRM_FUNC) (void *, SYSPRM_DATATYPE, void *, SYSPRM_DATATYPE); static int prm_size_to_io_pages (void *out_val, SYSPRM_DATATYPE out_type, void *in_val, SYSPRM_DATATYPE in_type); @@ -5547,17 +5549,6 @@ static SYSPRM_PARAM prm_Def[] = { (char *) NULL, (DUP_PRM_FUNC) NULL, (DUP_PRM_FUNC) NULL}, - {PRM_ID_ENABLE_DWB_CHECKSUM_THREAD, - PRM_NAME_ENABLE_DWB_CHECKSUM_THREAD, - (PRM_FOR_SERVER | PRM_USER_CHANGE), - PRM_BOOLEAN, - &prm_dwb_checksum_thread_flag, - (void *) &prm_enable_dwb_checksum_thread_default, - (void *) &PRM_ENABLE_DWB_CHECKSUM_THREAD, - (void *) NULL, (void *) NULL, - (char *) NULL, - (DUP_PRM_FUNC) NULL, - (DUP_PRM_FUNC) NULL}, {PRM_ID_DWB_LOGGING, PRM_NAME_DWB_LOGGING, (PRM_FOR_SERVER | PRM_USER_CHANGE), @@ -5569,6 +5560,17 @@ static SYSPRM_PARAM prm_Def[] = { (char *) NULL, (DUP_PRM_FUNC) NULL, (DUP_PRM_FUNC) NULL}, + {PRM_ID_DATA_FILE_ADVISE, + PRM_NAME_DATA_FILE_ADVISE, + (PRM_FOR_SERVER | PRM_USER_CHANGE), + PRM_INTEGER, + &prm_data_file_advise_flag, + (void *) &prm_data_file_advise_default, + (void *) &PRM_DATA_FILE_ADVISE, + (void *) NULL, (void *) NULL, + (char *) NULL, + (DUP_PRM_FUNC) NULL, + (DUP_PRM_FUNC) NULL} }; #define NUM_PRM ((int)(sizeof(prm_Def)/sizeof(prm_Def[0]))) diff --git a/src/base/system_parameter.h b/src/base/system_parameter.h index 179b0679af7..5a8dca1885b 100644 --- a/src/base/system_parameter.h +++ b/src/base/system_parameter.h @@ -419,11 +419,11 @@ enum param_id PRM_ID_DWB_SIZE, PRM_ID_DWB_BLOCKS, PRM_ID_ENABLE_DWB_FLUSH_THREAD, - PRM_ID_ENABLE_DWB_CHECKSUM_THREAD, PRM_ID_DWB_LOGGING, + PRM_ID_DATA_FILE_ADVISE, /* change PRM_LAST_ID when adding new system parameters */ - PRM_LAST_ID = PRM_ID_DWB_LOGGING + PRM_LAST_ID = PRM_ID_DATA_FILE_ADVISE }; typedef enum param_id PARAM_ID; diff --git a/src/base/tz_compile.c b/src/base/tz_compile.c index 1e681313fc6..7d968c291be 100644 --- a/src/base/tz_compile.c +++ b/src/base/tz_compile.c @@ -27,6 +27,7 @@ #include "porting.h" #include "byte_order.h" #include "utility.h" +#include "tz_support.h" #include "db_date.h" #include "environment_variable.h" #include "chartype.h" @@ -35,7 +36,6 @@ #include "memory_alloc.h" #include "tz_compile.h" -#include "tz_support.h" #include "xml_parser.h" #include "md5.h" #include "db_query.h" diff --git a/src/base/tz_support.h b/src/base/tz_support.h index 56f45f669bf..ed4dcf1634e 100644 --- a/src/base/tz_support.h +++ b/src/base/tz_support.h @@ -27,6 +27,21 @@ #include "thread_compat.hpp" #include "timezone_lib_common.h" +#define db_utime_to_string db_timestamp_to_string +#define db_string_to_utime db_string_to_timestamp +#define db_date_parse_utime db_date_parse_timestamp + +enum +{ + TIME_SPECIFIER = 1, + DATE_SPECIFIER = 2, + DATETIME_SPECIFIER = 3, + REMOVED_TIMETZ_SPECIFIER = 4, + DATETIMETZ_SPECIFIER = 5 +}; + +extern void db_date_locale_init (void); + #define TZLIB_SYMBOL_NAME_SIZE 64 #define MAX_LEN_OFFSET 10 @@ -108,25 +123,6 @@ enum #define TZ_IS_UTC_TZ_REGION(r) \ ((r)->type == TZ_REGION_OFFSET && (r)->offset == 0) - -enum tz_region_type -{ - TZ_REGION_OFFSET = 0, - TZ_REGION_ZONE = 1 -}; -typedef enum tz_region_type TZ_REGION_TYPE; - -typedef struct tz_region TZ_REGION; -struct tz_region -{ - TZ_REGION_TYPE type; /* 0 : offset ; 1 : zone */ - union - { - int offset; /* in seconds */ - unsigned int zone_id; /* geographical zone id */ - }; -}; - typedef DB_BIGINT full_date_t; #if defined (SA_MODE) extern bool tz_Is_backward_compatible_timezone[]; diff --git a/src/base/xserver_interface.h b/src/base/xserver_interface.h index 5f725319f3d..47dcc1f89cf 100644 --- a/src/base/xserver_interface.h +++ b/src/base/xserver_interface.h @@ -49,6 +49,9 @@ #include "storage_common.h" #include "thread_compat.hpp" +// forward definitions +struct compile_context; + extern int xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_DB_PATH_INFO * db_path_info, bool db_overwrite, const char *file_addmore_vols, volatile DKNPAGES db_npages, PGLENGTH db_desired_pagesize, volatile DKNPAGES xlog_npages, @@ -163,6 +166,13 @@ extern BTID *xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char HFID * hfids, int unique_pk, int not_null_flag, OID * fk_refcls_oid, BTID * fk_refcls_pk_btid, const char *fk_name, char *pred_stream, int pred_stream_size, char *expr_stream, int expr_steram_size, int func_col_id, int func_attr_index_start); +extern BTID *xbtree_load_online_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP_DOMAIN * key_type, + OID * class_oids, int n_classes, int n_attrs, int *attr_ids, + int *attrs_prefix_length, HFID * hfids, int unique_pk, int not_null_flag, + OID * fk_refcls_oid, BTID * fk_refcls_pk_btid, const char *fk_name, + char *pred_stream, int pred_stream_size, char *expr_stream, int expr_steram_size, + int func_col_id, int func_attr_index_start); + extern int xbtree_delete_index (THREAD_ENTRY * thread_p, BTID * btid); extern BTREE_SEARCH xbtree_find_unique (THREAD_ENTRY * thread_p, BTID * btid, SCAN_OPERATION_TYPE scan_op_type, DB_VALUE * key, OID * class_oid, OID * oid, bool is_all_class_srch); @@ -194,7 +204,7 @@ extern int xqfile_get_list_file_page (THREAD_ENTRY * thread_p, QUERY_ID query_id char *page_bufp, int *page_sizep); /* new query interface */ -extern int xqmgr_prepare_query (THREAD_ENTRY * thrd, COMPILE_CONTEXT * ctx, XASL_STREAM * stream); +extern int xqmgr_prepare_query (THREAD_ENTRY * thrd, compile_context * ctx, XASL_STREAM * stream); extern QFILE_LIST_ID *xqmgr_execute_query (THREAD_ENTRY * thrd, const XASL_ID * xasl_id, QUERY_ID * query_idp, int dbval_cnt, void *data, QUERY_FLAG * flagp, CACHE_TIME * clt_cache_time, diff --git a/src/broker/broker.c b/src/broker/broker.c index 423e95c604e..1641dd79a13 100644 --- a/src/broker/broker.c +++ b/src/broker/broker.c @@ -73,16 +73,15 @@ #include "shard_shm.h" #include "shard_metadata.h" #include "broker_proxy_conn.h" +#include "dbtype_def.h" #if defined(WINDOWS) #include "broker_wsa_init.h" #endif -#if !defined(CAS_FOR_ORACLE) && !defined(CAS_FOR_MYSQL) -#include "dbdef.h" -#else /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ +#if defined(CAS_FOR_ORACLE) || defined(CAS_FOR_MYSQL) #define DB_EMPTY_SESSION (0) -#endif /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ +#endif /* CAS_FOR_ORACLE || CAS_FOR_MYSQL */ #ifdef WIN_FW #if !defined(WINDOWS) diff --git a/src/broker/broker_admin_pub.c b/src/broker/broker_admin_pub.c index 7d2a2bf7218..18587f7d5ff 100644 --- a/src/broker/broker_admin_pub.c +++ b/src/broker/broker_admin_pub.c @@ -73,12 +73,11 @@ #include "broker_acl.h" #include "chartype.h" #include "cubrid_getopt.h" +#include "dbtype_def.h" -#if !defined(CAS_FOR_ORACLE) && !defined(CAS_FOR_MYSQL) -#include "dbdef.h" -#else /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ +#if defined(CAS_FOR_ORACLE) || defined(CAS_FOR_MYSQL) #define DB_EMPTY_SESSION (0) -#endif /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ +#endif /* CAS_FOR_ORACLE || CAS_FOR_MYSQL */ #define ADMIN_ERR_MSG_SIZE 1024 diff --git a/src/broker/broker_log_converter.c b/src/broker/broker_log_converter.c index b7dd97b7663..77407e89101 100644 --- a/src/broker/broker_log_converter.c +++ b/src/broker/broker_log_converter.c @@ -395,6 +395,10 @@ log_bind_value (char *str, int bind_len, int lineno, FILE * outfp) { type = CCI_U_TYPE_ENUM; } + else if (strcmp (p, "JSON") == 0) + { + type = CCI_U_TYPE_JSON; + } else { fprintf (stderr, "log error [line:%d]\n", lineno); diff --git a/src/broker/broker_log_replay.c b/src/broker/broker_log_replay.c index 68c425e7e62..ffab1911fc8 100644 --- a/src/broker/broker_log_replay.c +++ b/src/broker/broker_log_replay.c @@ -723,6 +723,10 @@ get_cci_type (char *p) { type = CCI_U_TYPE_ENUM; } + else if (strcmp (p, "JSON") == 0) + { + type = CCI_U_TYPE_JSON; + } else { type = -1; diff --git a/src/broker/broker_log_util.c b/src/broker/broker_log_util.c index d95fd08535c..cefc0874594 100644 --- a/src/broker/broker_log_util.c +++ b/src/broker/broker_log_util.c @@ -110,7 +110,7 @@ is_bind_with_size (char *buf, int *tot_val_size, int *info_size) type = atoi (buf + 2); if ((type != CCI_U_TYPE_CHAR) && (type != CCI_U_TYPE_STRING) && (type != CCI_U_TYPE_NCHAR) && (type != CCI_U_TYPE_VARNCHAR) && (type != CCI_U_TYPE_BIT) && (type != CCI_U_TYPE_VARBIT) - && (type != CCI_U_TYPE_ENUM)) + && (type != CCI_U_TYPE_ENUM) && (type != CCI_U_TYPE_JSON)) { return false; } diff --git a/src/broker/cas.c b/src/broker/cas.c index 9c3c1e9c0c2..a61e5c4f995 100644 --- a/src/broker/cas.c +++ b/src/broker/cas.c @@ -1938,9 +1938,7 @@ process_request (SOCKET sock_fd, T_NET_BUF * net_buf, T_REQ_INFO * req_info) { ux_set_utype_for_enum (CCI_U_TYPE_STRING); } -#endif /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ -#if !defined(CAS_FOR_ORACLE) && !defined(CAS_FOR_MYSQL) /* for driver less than 10.0 */ if (!DOES_CLIENT_UNDERSTAND_THE_PROTOCOL (req_info->client_version, PROTOCOL_V7)) { @@ -1949,9 +1947,13 @@ process_request (SOCKET sock_fd, T_NET_BUF * net_buf, T_REQ_INFO * req_info) ux_set_utype_for_datetimeltz (CCI_U_TYPE_DATETIME); ux_set_utype_for_timestampltz (CCI_U_TYPE_TIMESTAMP); } -#endif - /* Since DB_TYPE_JSON is mapped into CCI_U_TYPE_STRING, legacy drivers are also able to access JSON type. */ + /* driver version < 10.2 */ + if (!DOES_CLIENT_UNDERSTAND_THE_PROTOCOL (req_info->client_version, PROTOCOL_V8)) + { + ux_set_utype_for_json (CCI_U_TYPE_STRING); + } +#endif net_buf->client_version = req_info->client_version; set_hang_check_time (); @@ -1959,14 +1961,12 @@ process_request (SOCKET sock_fd, T_NET_BUF * net_buf, T_REQ_INFO * req_info) set_hang_check_time (); #if !defined(CAS_FOR_ORACLE) && !defined(CAS_FOR_MYSQL) - /* set back original utype for enum */ + /* set back original utype for enum, date-time, JSON */ if (DOES_CLIENT_MATCH_THE_PROTOCOL (req_info->client_version, PROTOCOL_V2)) { ux_set_utype_for_enum (CCI_U_TYPE_ENUM); } -#endif /* !CAS_FOR_ORACLE && !CAS_FOR_MYSQL */ -#if !defined(CAS_FOR_ORACLE) && !defined(CAS_FOR_MYSQL) /* for driver less than 10.0 */ if (!DOES_CLIENT_UNDERSTAND_THE_PROTOCOL (req_info->client_version, PROTOCOL_V7)) { @@ -1975,6 +1975,12 @@ process_request (SOCKET sock_fd, T_NET_BUF * net_buf, T_REQ_INFO * req_info) ux_set_utype_for_datetimeltz (CCI_U_TYPE_DATETIMETZ); ux_set_utype_for_timestampltz (CCI_U_TYPE_TIMESTAMPTZ); } + + /* driver version < 10.2 */ + if (!DOES_CLIENT_UNDERSTAND_THE_PROTOCOL (req_info->client_version, PROTOCOL_V8)) + { + ux_set_utype_for_json (CCI_U_TYPE_JSON); + } #endif #ifndef LIBCAS_FOR_JSP diff --git a/src/broker/cas_execute.c b/src/broker/cas_execute.c index 80e4cebdf59..1b36dd8f9af 100644 --- a/src/broker/cas_execute.c +++ b/src/broker/cas_execute.c @@ -57,6 +57,7 @@ #include "broker_filename.h" #include "cas_sql_log2.h" +#include "tz_support.h" #include "release_string.h" #include "perf_monitor.h" #include "intl_support.h" @@ -68,6 +69,7 @@ #include "system_parameter.h" #include "schema_manager.h" #include "object_representation.h" +#include "connection_cl.h" #include "dbi.h" #include "dbtype.h" @@ -187,7 +189,8 @@ extern void set_query_timeout (T_SRV_HANDLE * srv_handle, int query_timeout); static int netval_to_dbval (void *type, void *value, DB_VALUE * db_val, T_NET_BUF * net_buf, char desired_type); static int cur_tuple (T_QUERY_RESULT * q_result, int max_col_size, char sensitive_flag, DB_OBJECT * obj, T_NET_BUF * net_buf); -static int dbval_to_net_buf (DB_VALUE * val, T_NET_BUF * net_buf, char flag, int max_col_size, char column_type_flag); +static int dbval_to_net_buf (DB_VALUE * val, T_NET_BUF * net_buf, char fetch_flag, int max_col_size, + char column_type_flag); static void dbobj_to_casobj (DB_OBJECT * obj, T_OBJECT * cas_obj); static void casobj_to_dbobj (T_OBJECT * cas_obj, DB_OBJECT ** obj); static void dblob_to_caslob (DB_VALUE * lob, T_LOB_HANDLE * cas_lob); @@ -352,7 +355,7 @@ static char cas_u_type[] = { 0, /* 0 */ CCI_U_TYPE_TIMESTAMPLTZ, /* 37 */ CCI_U_TYPE_DATETIMETZ, /* 38 */ CCI_U_TYPE_DATETIMELTZ, /* 39 */ - CCI_U_TYPE_STRING, /* 40 */ + CCI_U_TYPE_JSON, /* 40 */ }; static T_FETCH_FUNC fetch_func[] = { @@ -3868,6 +3871,10 @@ netval_to_dbval (void *net_type, void *net_value, DB_VALUE * out_val, T_NET_BUF { type = CCI_U_TYPE_NCHAR; } + else if (desired_type == DB_TYPE_JSON) + { + type = CCI_U_TYPE_JSON; + } } if (type == CCI_U_TYPE_DATETIME) @@ -4342,6 +4349,16 @@ netval_to_dbval (void *net_type, void *net_value, DB_VALUE * out_val, T_NET_BUF coercion_flag = FALSE; } break; + case CCI_U_TYPE_JSON: + { + char *value; + int val_size; + + net_arg_get_str (&value, &val_size, net_value); + + err_code = db_json_val_from_str (value, val_size, &db_val); + } + break; case CCI_U_TYPE_USHORT: case CCI_U_TYPE_UINT: @@ -5018,6 +5035,8 @@ dbval_to_net_buf (DB_VALUE * val, T_NET_BUF * net_buf, char fetch_flag, int max_ str = db_get_json_raw_body (val); bytes_size = strlen (str); + /* no matter which column type is returned to client (JSON or STRING, depending on client version), + * the data is always encoded as string */ add_res_data_string (net_buf, str, bytes_size, 0, CAS_SCHEMA_DEFAULT_CHARSET, &data_size); db_private_free (NULL, str); } @@ -5455,6 +5474,9 @@ fetch_result (T_SRV_HANDLE * srv_handle, int cursor_pos, int fetch_count, char f } } + /* Be sure that cursor is closed, if query executed with commit and not holdable. */ + assert (!tran_was_latest_query_committed () || srv_handle->is_holdable == true || err_code == DB_CURSOR_END); + if (DOES_CLIENT_UNDERSTAND_THE_PROTOCOL (client_version, PROTOCOL_V5)) { net_buf_cp_byte (net_buf, fetch_end_flag); @@ -9622,11 +9644,10 @@ has_stmt_result_set (char stmt_type) static bool check_auto_commit_after_fetch_done (T_SRV_HANDLE * srv_handle) { - // scrollable cursor can also be closed with help of holdable cursor // To close an updatable cursor is dangerous since it lose locks and updating cursor is allowed before closing it. if (srv_handle->auto_commit_mode == TRUE && srv_handle->cur_result_index == srv_handle->num_q_result - && srv_handle->is_updatable == FALSE) + && srv_handle->forward_only_cursor == TRUE && srv_handle->is_updatable == FALSE) { return true; } diff --git a/src/broker/cas_function.c b/src/broker/cas_function.c index 09716efd979..9528cb9fa87 100644 --- a/src/broker/cas_function.c +++ b/src/broker/cas_function.c @@ -144,6 +144,7 @@ static const char *type_str_tbl[] = { "DATETIMETZ", /* CCI_U_TYPE_DATETIMETZ */ "DATETIMELTZ", /* CCI_U_TYPE_DATETIMELTZ */ "TIMETZ", /* CCI_U_TYPE_TIMETZ */ + "JSON", /* CCI_U_TYPE_JSON */ }; FN_RETURN @@ -2208,6 +2209,7 @@ bind_value_print (char type, void *net_value, bool slow_log) case CCI_U_TYPE_VARBIT: case CCI_U_TYPE_NUMERIC: case CCI_U_TYPE_ENUM: + case CCI_U_TYPE_JSON: { char *str_val; int val_size; diff --git a/src/broker/cas_protocol.h b/src/broker/cas_protocol.h index 5625882e254..445fd593c2d 100644 --- a/src/broker/cas_protocol.h +++ b/src/broker/cas_protocol.h @@ -218,7 +218,8 @@ extern "C" PROTOCOL_V5 = 5, /* shard feature, fetch end flag */ PROTOCOL_V6 = 6, /* cci/cas4m support unsigned integer type */ PROTOCOL_V7 = 7, /* timezone types, to pin xasl entry for retry */ - CURRENT_PROTOCOL = PROTOCOL_V7 + PROTOCOL_V8 = 8, /* JSON type */ + CURRENT_PROTOCOL = PROTOCOL_V8 }; typedef enum t_cas_protocol T_CAS_PROTOCOL; diff --git a/src/broker/cas_runner.c b/src/broker/cas_runner.c index 2e65f9d4c19..e61939f6f22 100644 --- a/src/broker/cas_runner.c +++ b/src/broker/cas_runner.c @@ -1109,7 +1109,7 @@ process_bind (char *linebuf, int *num_bind_p, T_BIND_INFO * bind_info) if ((bind_info[num_bind].type == CCI_U_TYPE_CHAR) || (bind_info[num_bind].type == CCI_U_TYPE_STRING) || (bind_info[num_bind].type == CCI_U_TYPE_NCHAR) || (bind_info[num_bind].type == CCI_U_TYPE_VARNCHAR) || (bind_info[num_bind].type == CCI_U_TYPE_BIT) || (bind_info[num_bind].type == CCI_U_TYPE_VARBIT) - || (bind_info[num_bind].type == CCI_U_TYPE_ENUM)) + || (bind_info[num_bind].type == CCI_U_TYPE_ENUM) || (bind_info[num_bind].type == CCI_U_TYPE_JSON)) { bind_info[num_bind].len = atoi (p + 1); p = strchr (p + 1, ' '); diff --git a/src/broker/shard_proxy_io.c b/src/broker/shard_proxy_io.c index 9c9cb3c7d84..6b0d9e56874 100644 --- a/src/broker/shard_proxy_io.c +++ b/src/broker/shard_proxy_io.c @@ -44,6 +44,13 @@ #include "shard_shm.h" #include "broker_acl.h" +#ifndef min +#define min(a,b) ((a) < (b) ? (a) : (b)) +#endif +#ifndef max +#define max(a,b) ((a) > (b) ? (a) : (b)) +#endif + #if defined (SUPPRESS_STRLEN_WARNING) #define strlen(s1) ((int) strlen(s1)) #endif /* defined (SUPPRESS_STRLEN_WARNING) */ diff --git a/src/broker/shard_proxy_io.h b/src/broker/shard_proxy_io.h index 647b6524a83..73a5b7c1dfe 100644 --- a/src/broker/shard_proxy_io.h +++ b/src/broker/shard_proxy_io.h @@ -42,13 +42,6 @@ #include "shard_proxy_common.h" #include "shard_metadata.h" -#ifndef min -#define min(a,b) ((a) < (b) ? (a) : (b)) -#endif -#ifndef max -#define max(a,b) ((a) > (b) ? (a) : (b)) -#endif - #if defined(WINDOWS) #define CLOSESOCKET(fd) closesocket((SOCKET)fd) #define READSOCKET(fd, buf, len) recv((SOCKET)fd, buf, len, 0) diff --git a/src/cci/cas_cci.c b/src/cci/cas_cci.c index e303d97e12a..f7de81d7c27 100644 --- a/src/cci/cas_cci.c +++ b/src/cci/cas_cci.c @@ -5142,6 +5142,8 @@ dbg_u_type_str (T_CCI_U_TYPE utype) return "CCI_U_TYPE_UINT"; case CCI_U_TYPE_UBIGINT: return "CCI_U_TYPE_UBIGINT"; + case CCI_U_TYPE_JSON: + return "CCI_U_TYPE_JSON"; default: return "***"; } diff --git a/src/cci/cas_cci.h b/src/cci/cas_cci.h index 69fd7a1921c..e82ad74d4f8 100644 --- a/src/cci/cas_cci.h +++ b/src/cci/cas_cci.h @@ -354,7 +354,8 @@ typedef enum /* Disabled type */ CCI_U_TYPE_TIMETZ = 33, /* internal use only - RESERVED */ /* end of disabled types */ - CCI_U_TYPE_LAST = CCI_U_TYPE_DATETIMELTZ + CCI_U_TYPE_JSON = 34, + CCI_U_TYPE_LAST = CCI_U_TYPE_JSON } T_CCI_U_TYPE; typedef unsigned char T_CCI_U_EXT_TYPE; @@ -577,7 +578,6 @@ typedef struct DATASOURCE_T T_CCI_DATASOURCE; #endif #define CUBRID_STMT_CALL_SP 0x7e #define CUBRID_STMT_UNKNOWN 0x7f - /* for backward compatibility */ #define T_CCI_SQLX_CMD T_CCI_CUBRID_STMT diff --git a/src/cci/cci_query_execute.c b/src/cci/cci_query_execute.c index 824e9a5f7e4..df12b7c3d9e 100644 --- a/src/cci/cci_query_execute.c +++ b/src/cci/cci_query_execute.c @@ -3263,6 +3263,7 @@ qe_get_data_str (T_VALUE_BUF * conv_val_buf, T_CCI_U_TYPE u_type, char *col_valu case CCI_U_TYPE_VARNCHAR: case CCI_U_TYPE_NUMERIC: case CCI_U_TYPE_ENUM: + case CCI_U_TYPE_JSON: { *((char **) value) = col_value_p; *indicator = col_val_size - 1; @@ -5094,7 +5095,7 @@ fetch_info_decode (char *buf, int size, int num_cols, T_TUPLE_VALUE ** tuple_val if (charset != NULL && (u_type == CCI_U_TYPE_CHAR || u_type == CCI_U_TYPE_STRING || u_type == CCI_U_TYPE_NCHAR - || u_type == CCI_U_TYPE_VARNCHAR || u_type == CCI_U_TYPE_ENUM)) + || u_type == CCI_U_TYPE_VARNCHAR || u_type == CCI_U_TYPE_ENUM || u_type == CCI_U_TYPE_JSON)) { err_code = decode_result_col (col_p, data_size, &(tmp_tuple_value[i].column_ptr[j]), charset); @@ -5728,6 +5729,7 @@ bind_value_conversion (T_CCI_A_TYPE a_type, T_CCI_U_TYPE u_type, char flag, void case CCI_U_TYPE_VARNCHAR: case CCI_U_TYPE_NUMERIC: case CCI_U_TYPE_ENUM: + case CCI_U_TYPE_JSON: if (length == UNMEASURED_LENGTH) { bind_value->size = strlen ((const char *) value); @@ -6632,6 +6634,7 @@ bind_value_to_net_buf (T_NET_BUF * net_buf, T_CCI_U_TYPE u_type, void *value, in case CCI_U_TYPE_NCHAR: case CCI_U_TYPE_VARNCHAR: case CCI_U_TYPE_ENUM: + case CCI_U_TYPE_JSON: if (value == NULL) { ADD_ARG_BIND_STR (net_buf, "", 1, charset); diff --git a/src/cci/cci_t_set.c b/src/cci/cci_t_set.c index 69b87a920ec..be9334f4271 100644 --- a/src/cci/cci_t_set.c +++ b/src/cci/cci_t_set.c @@ -238,6 +238,7 @@ t_set_make (T_SET * set, char ele_type, int size, void *value, int *indicator) case CCI_U_TYPE_VARNCHAR: case CCI_U_TYPE_NUMERIC: case CCI_U_TYPE_ENUM: + case CCI_U_TYPE_JSON: { char *ele_value; ele_value = ((char **) value)[i]; diff --git a/src/communication/network_interface_cl.c b/src/communication/network_interface_cl.c index fc1d81d5fba..30a0e8ac8d5 100644 --- a/src/communication/network_interface_cl.c +++ b/src/communication/network_interface_cl.c @@ -63,6 +63,7 @@ #include "db.h" #include "db_query.h" #include "dbtype.h" +#include "compile_context.h" #if defined (SA_MODE) #include "thread_manager.hpp" #endif // SA_MODE @@ -5580,18 +5581,19 @@ btree_load_index (BTID * btid, const char *bt_name, TP_DOMAIN * key_type, OID * int *attr_ids, int *attrs_prefix_length, HFID * hfids, int unique_pk, int not_null_flag, OID * fk_refcls_oid, BTID * fk_refcls_pk_btid, const char *fk_name, char *pred_stream, int pred_stream_size, char *expr_stream, int expr_stream_size, int func_col_id, - int func_attr_index_start) + int func_attr_index_start, SM_INDEX_STATUS index_status) { #if defined(CS_MODE) int error = NO_ERROR, req_error, request_size, domain_size; char *ptr; char *request; - OR_ALIGNED_BUF (OR_INT_SIZE + OR_BTID_ALIGNED_SIZE) a_reply; + OR_ALIGNED_BUF (OR_INT_SIZE * 2 + OR_BTID_ALIGNED_SIZE) a_reply; char *reply; int i, total_attrs, bt_strlen, fk_strlen; int index_info_size = 0; char *stream = NULL; int stream_size = 0; + LOCK curr_cls_lock; reply = OR_ALIGNED_BUF_START (a_reply); @@ -5618,7 +5620,8 @@ btree_load_index (BTID * btid, const char *bt_name, TP_DOMAIN * key_type, OID * + OR_OID_SIZE /* fk_refcls_oid */ + OR_BTID_ALIGNED_SIZE /* fk_refcls_pk_btid */ + or_packed_string_length (fk_name, &fk_strlen) /* fk_name */ - + index_info_size /* filter predicate or function index stream size */ ); + + index_info_size /* filter predicate or function index stream size */ + + OR_INT_SIZE /* Index status */ ); request = (char *) malloc (request_size); if (request == NULL) @@ -5692,13 +5695,21 @@ btree_load_index (BTID * btid, const char *bt_name, TP_DOMAIN * key_type, OID * ptr = or_pack_int (ptr, -1); /* stream=NULL, stream_size=0 */ } + ptr = or_pack_int (ptr, index_status); /* Index status. */ + req_error = net_client_request (NET_SERVER_BTREE_LOADINDEX, request, request_size, reply, OR_ALIGNED_BUF_SIZE (a_reply), stream, stream_size, NULL, 0); if (!req_error) { + int t; + ptr = or_unpack_int (reply, &error); + + ptr = or_unpack_int (ptr, &t); + curr_cls_lock = (LOCK) t; + ptr = or_unpack_btid (ptr, btid); if (error != NO_ERROR) { @@ -5710,6 +5721,18 @@ btree_load_index (BTID * btid, const char *bt_name, TP_DOMAIN * key_type, OID * btid = NULL; } + if (index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS && curr_cls_lock != SCH_M_LOCK) + { + // hope it won't happen. server failed to restore the demoted lock. + // It just help things don't go worse. + + MOP class_mop = ws_mop (&class_oids[0], sm_Root_class_mop); + if (class_mop != NULL) + { + ws_set_lock (class_mop, curr_cls_lock); + } + } + free_and_init (request); return error; diff --git a/src/communication/network_interface_cl.h b/src/communication/network_interface_cl.h index 38cd3296054..1ebb328091a 100644 --- a/src/communication/network_interface_cl.h +++ b/src/communication/network_interface_cl.h @@ -33,7 +33,7 @@ #include -#include "dbdef.h" +#include "dbtype_def.h" #include "replication.h" #include "server_interface.h" #include "perf_monitor.h" @@ -47,6 +47,10 @@ #include "log_impl.h" #include "parse_tree.h" #include "xasl.h" +#include "timezone_lib_common.h" + +// forward definitions +struct compile_context; /* killtran supporting structures and functions */ typedef struct one_tran_info ONE_TRAN_INFO; @@ -206,7 +210,7 @@ extern int btree_load_index (BTID * btid, const char *bt_name, TP_DOMAIN * key_t int n_attrs, int *attr_ids, int *attrs_prefix_length, HFID * hfids, int unique_pk, int not_null_flag, OID * fk_refcls_oid, BTID * fk_refcls_pk_btid, const char *fk_name, char *pred_stream, int pred_stream_size, char *expr_stream, int expr_stream_size, - int func_col_id, int func_attr_index_start); + int func_col_id, int func_attr_index_start, SM_INDEX_STATUS index_status); extern int btree_delete_index (BTID * btid); extern int locator_log_force_nologging (void); extern int locator_remove_class_from_index (OID * oid, BTID * btid, HFID * hfid); @@ -216,7 +220,7 @@ extern BTREE_SEARCH btree_find_multi_uniques (OID * class_oid, int pruning_type, int count, SCAN_OPERATION_TYPE op_type, OID ** oids, int *oids_count); extern int btree_class_test_unique (char *buf, int buf_size); extern int qfile_get_list_file_page (QUERY_ID query_id, VOLID volid, PAGEID pageid, char *buffer, int *buffer_size); -extern int qmgr_prepare_query (COMPILE_CONTEXT * context, XASL_STREAM * stream); +extern int qmgr_prepare_query (struct compile_context *context, XASL_STREAM * stream); extern QFILE_LIST_ID *qmgr_execute_query (const XASL_ID * xasl_id, QUERY_ID * query_idp, int dbval_cnt, const DB_VALUE * dbvals, QUERY_FLAG flag, CACHE_TIME * clt_cache_time, diff --git a/src/communication/network_interface_sr.c b/src/communication/network_interface_sr.c index 39137616282..30fefa24155 100644 --- a/src/communication/network_interface_sr.c +++ b/src/communication/network_interface_sr.c @@ -75,6 +75,7 @@ #include "tz_support.h" #include "dbtype.h" #include "thread_manager.hpp" // for thread_get_thread_entry_info +#include "compile_context.h" #if defined (SUPPRESS_STRLEN_WARNING) #define strlen(s1) ((int) strlen(s1)) @@ -88,6 +89,11 @@ #define NET_DEFER_END_QUERIES_MAX 10 +/* Query execution with commit. */ +#define QEWC_SAFE_GUARD_SIZE 1024 +// To have the safe area is just a safe guard to avoid potential issues of bad size calculation. +#define QEWC_MAX_DATA_SIZE (DB_PAGESIZE - QEWC_SAFE_GUARD_SIZE) + /* This file is only included in the server. So set the on_server flag on */ unsigned int db_on_server = 1; @@ -100,6 +106,8 @@ STATIC_INLINE void stran_server_auto_commit_or_abort (THREAD_ENTRY * thread_p, u bool has_updated, bool * end_query_allowed, TRAN_STATE * tran_state, bool * should_conn_reset) __attribute__ ((ALWAYS_INLINE)); +STATIC_INLINE int stran_can_end_after_query_execution (THREAD_ENTRY * thread_p, int query_flag, QFILE_LIST_ID * list_id, + bool * can_end_transaction) __attribute__ ((ALWAYS_INLINE)); static bool need_to_abort_tran (THREAD_ENTRY * thread_p, int *errid); static int server_capabilities (void); @@ -3806,7 +3814,7 @@ sbtree_load_index (THREAD_ENTRY * thread_p, unsigned int rid, char *request, int int *attr_prefix_lengths = NULL; TP_DOMAIN *key_type; char *ptr; - OR_ALIGNED_BUF (OR_INT_SIZE + OR_BTID_ALIGNED_SIZE) a_reply; + OR_ALIGNED_BUF (OR_INT_SIZE * 2 + OR_BTID_ALIGNED_SIZE) a_reply; char *reply = OR_ALIGNED_BUF_START (a_reply); char *pred_stream = NULL; int pred_stream_size = 0, size = 0; @@ -3814,6 +3822,7 @@ sbtree_load_index (THREAD_ENTRY * thread_p, unsigned int rid, char *request, int int index_info_type; char *expr_stream = NULL; int csserror; + int index_status = 0; ptr = or_unpack_btid (request, &btid); ptr = or_unpack_string_nocopy (ptr, &bt_name); @@ -3897,11 +3906,24 @@ sbtree_load_index (THREAD_ENTRY * thread_p, unsigned int rid, char *request, int break; } - return_btid = - xbtree_load_index (thread_p, &btid, bt_name, key_type, class_oids, n_classes, n_attrs, attr_ids, - attr_prefix_lengths, hfids, unique_pk, not_null_flag, &fk_refcls_oid, &fk_refcls_pk_btid, - fk_name, pred_stream, pred_stream_size, expr_stream, expr_stream_size, func_col_id, - func_attr_index_start); + ptr = or_unpack_int (ptr, &index_status); /* Get index status. */ + if (index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + return_btid = + xbtree_load_online_index (thread_p, &btid, bt_name, key_type, class_oids, n_classes, n_attrs, attr_ids, + attr_prefix_lengths, hfids, unique_pk, not_null_flag, &fk_refcls_oid, + &fk_refcls_pk_btid, fk_name, pred_stream, pred_stream_size, expr_stream, + expr_stream_size, func_col_id, func_attr_index_start); + } + else + { + return_btid = + xbtree_load_index (thread_p, &btid, bt_name, key_type, class_oids, n_classes, n_attrs, attr_ids, + attr_prefix_lengths, hfids, unique_pk, not_null_flag, &fk_refcls_oid, &fk_refcls_pk_btid, + fk_name, pred_stream, pred_stream_size, expr_stream, expr_stream_size, func_col_id, + func_attr_index_start); + } + if (return_btid == NULL) { (void) return_error_to_client (thread_p, rid); @@ -3911,12 +3933,30 @@ sbtree_load_index (THREAD_ENTRY * thread_p, unsigned int rid, char *request, int if (return_btid == NULL) { - ptr = or_pack_int (reply, er_errid ()); + int err; + + ASSERT_ERROR_AND_SET (err); + ptr = or_pack_int (reply, err); } else { ptr = or_pack_int (reply, NO_ERROR); } + + if (index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + // it may not be really necessary. it just help things don't go worse that client keep caching ex-lock. + int tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p); + LOCK cls_lock = lock_get_object_lock (&class_oids[0], oid_Root_class_oid, tran_index); + + assert (cls_lock == SCH_M_LOCK); // hope it never be IX_LOCK. + ptr = or_pack_int (ptr, (int) cls_lock); + } + else + { + ptr = or_pack_int (ptr, SCH_M_LOCK); // irrelevant + } + ptr = or_pack_btid (ptr, &btid); css_send_data_to_client (thread_p->conn_entry, rid, reply, OR_ALIGNED_BUF_SIZE (a_reply)); @@ -4622,6 +4662,135 @@ sqmgr_prepare_query (THREAD_ENTRY * thread_p, unsigned int rid, char *request, i } } +/* + * stran_can_end_after_query_execution - Check whether can end transaction after query execution. + * + * return:error code + * + * thread_p(in): thread entry + * query_flag(in): query flag + * list_id(in): list id + * can_end_transaction(out): true, if transaction can be safely ended + * + */ +STATIC_INLINE int +stran_can_end_after_query_execution (THREAD_ENTRY * thread_p, int query_flag, QFILE_LIST_ID * list_id, + bool * can_end_transaction) +{ + QFILE_LIST_SCAN_ID scan_id; + QFILE_TUPLE_RECORD tuple_record = { NULL, 0 }; + SCAN_CODE qp_scan; + OR_BUF buf; + TP_DOMAIN **domains; + PR_TYPE *pr_type; + int i, flag, compressed_size, decompressed_size, diff_size, val_length; + char *tuple_p; + bool found_compressible_string_domain, exceed_a_page; + + assert (list_id != NULL && list_id->type_list.domp != NULL && can_end_transaction != NULL); + + *can_end_transaction = false; + + if (list_id->page_cnt != 1) + { + /* Needs fetch request. Do not allow ending transaction. */ + return NO_ERROR; + } + + if (list_id->last_offset >= QEWC_MAX_DATA_SIZE) + { + /* Needs fetch request. Do not allow ending transaction. */ + return NO_ERROR; + } + + if (query_flag & RESULT_HOLDABLE) + { + /* Holdable result, do not check for compression. */ + *can_end_transaction = true; + return NO_ERROR; + } + + domains = list_id->type_list.domp; + found_compressible_string_domain = false; + for (i = 0; i < list_id->type_list.type_cnt; i++) + { + pr_type = domains[i]->type; + assert (pr_type != NULL); + + if (pr_type->id == DB_TYPE_VARCHAR || pr_type->id == DB_TYPE_VARNCHAR) + { + found_compressible_string_domain = true; + break; + } + } + + if (!found_compressible_string_domain) + { + /* Not compressible domains, do not check for compression. */ + *can_end_transaction = true; + return NO_ERROR; + } + + if (qfile_open_list_scan (list_id, &scan_id) != NO_ERROR) + { + return ER_FAILED; + } + + /* Estimates the data and header information. */ + diff_size = 0; + exceed_a_page = false; + while (!exceed_a_page) + { + qp_scan = qfile_scan_list_next (thread_p, &scan_id, &tuple_record, PEEK); + if (qp_scan != S_SUCCESS) + { + break; + } + + tuple_p = tuple_record.tpl; + or_init (&buf, tuple_p, QFILE_GET_TUPLE_LENGTH (tuple_p)); + tuple_p += QFILE_TUPLE_LENGTH_SIZE; + for (i = 0; i < list_id->type_list.type_cnt; i++) + { + flag = QFILE_GET_TUPLE_VALUE_FLAG (tuple_p); + val_length = QFILE_GET_TUPLE_VALUE_LENGTH (tuple_p); + tuple_p += QFILE_TUPLE_VALUE_HEADER_SIZE; + + pr_type = domains[i]->type; + if (flag != V_UNBOUND && (pr_type->id == DB_TYPE_VARCHAR || pr_type->id == DB_TYPE_VARNCHAR)) + { + buf.ptr = tuple_p; + or_get_varchar_compression_lengths (&buf, &compressed_size, &decompressed_size); + if (compressed_size != 0) + { + /* Compression used. */ + diff_size += decompressed_size - compressed_size; + if (list_id->last_offset + diff_size >= QEWC_MAX_DATA_SIZE) + { + /* Needs fetch request. Do not allow ending transaction. */ + exceed_a_page = true; + break; + } + } + } + + tuple_p += val_length; + } + } + + qfile_close_scan (thread_p, &scan_id); + + if (qp_scan == S_ERROR) + { + // might be interrupted + return ER_FAILED; + } + + *can_end_transaction = !exceed_a_page; + + return NO_ERROR; +} + /* * sqmgr_execute_query - Process a SERVER_QM_EXECUTE request * @@ -4869,12 +5038,13 @@ sqmgr_execute_query (THREAD_ENTRY * thread_p, unsigned int rid, char *request, i qmgr_free_old_page_and_init (thread_p, page_ptr, list_id->tfile_vfid); page_ptr = aligned_page_buf; - /* for now, allow end query if there is only one page */ - if (list_id->page_cnt != 1) + /* for now, allow end query if there is only one page and more ... */ + if (stran_can_end_after_query_execution (thread_p, query_flag, list_id, &end_query_allowed) != NO_ERROR) { - // This execution request is followed by fetch. - end_query_allowed = false; + (void) return_error_to_client (thread_p, rid); } + + // When !end_query_allowed, it means this execution request is followed by fetch request(s). } else { diff --git a/src/compat/cache_time.h b/src/compat/cache_time.h new file mode 100644 index 00000000000..cc516cf673d --- /dev/null +++ b/src/compat/cache_time.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* + * cache_time.h - CACHE TIME RELATED DEFINITIONS + */ + +#ifndef _CACHE_TIME_H_ +#define _CACHE_TIME_H_ + +#ident "$Id$" + +typedef struct cache_time CACHE_TIME; +struct cache_time +{ + int sec; + int usec; +}; + +#define CACHE_TIME_AS_ARGS(ct) (ct)->sec, (ct)->usec + +#define CACHE_TIME_EQ(T1, T2) \ + (((T1)->sec != 0) && ((T1)->sec == (T2)->sec) && ((T1)->usec == (T2)->usec)) + +#define CACHE_TIME_RESET(T) \ + do \ + { \ + (T)->sec = 0; \ + (T)->usec = 0; \ + } \ + while (0) + +#define CACHE_TIME_MAKE(CT, TV) \ + do \ + { \ + (CT)->sec = (TV)->tv_sec; \ + (CT)->usec = (TV)->tv_usec; \ + } \ + while (0) + +#define OR_CACHE_TIME_SIZE (OR_INT_SIZE * 2) + +#define OR_PACK_CACHE_TIME(PTR, T) \ + do \ + { \ + if ((CACHE_TIME *) (T) != NULL) \ + { \ + PTR = or_pack_int (PTR, (T)->sec); \ + PTR = or_pack_int (PTR, (T)->usec); \ + } \ + else \ + { \ + PTR = or_pack_int (PTR, 0); \ + PTR = or_pack_int (PTR, 0); \ + } \ + } \ + while (0) + +#define OR_UNPACK_CACHE_TIME(PTR, T) \ + do \ + { \ + if ((CACHE_TIME *) (T) != NULL) \ + { \ + PTR = or_unpack_int (PTR, &((T)->sec)); \ + PTR = or_unpack_int (PTR, &((T)->usec)); \ + } \ + } \ + while (0) + +#endif /* _CACHE_TIME_T_ */ diff --git a/src/compat/cnv.c b/src/compat/cnv.c index 61c8bd13897..f1fcf6cdb67 100644 --- a/src/compat/cnv.c +++ b/src/compat/cnv.c @@ -49,6 +49,7 @@ #if defined(SERVER_MODE) #include "critical_section.h" #endif +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" #if defined (SERVER_MODE) diff --git a/src/compat/db.h b/src/compat/db.h index 8f8b339f03d..3e3c278dd97 100644 --- a/src/compat/db.h +++ b/src/compat/db.h @@ -31,7 +31,6 @@ #include #include "error_manager.h" -#include "dbdef.h" #include "intl_support.h" #include "db_date.h" #include "object_representation.h" diff --git a/src/compat/db_admin.c b/src/compat/db_admin.c index 629f5708cb5..f844f7e34d4 100644 --- a/src/compat/db_admin.c +++ b/src/compat/db_admin.c @@ -62,6 +62,7 @@ #if !defined(CS_MODE) #include "session.h" #endif +#include "connection_cl.h" #include "dbtype.h" #if !defined(WINDOWS) @@ -70,7 +71,22 @@ void (*prev_sigfpe_handler) (int) = SIG_DFL; #include "wintcp.h" #endif /* !WINDOWS */ -#include "db_admin.h" +/* host status for marking abnormal host status */ +typedef struct db_host_status DB_HOST_STATUS; +struct db_host_status +{ + char hostname[MAXHOSTNAMELEN]; + int status; +}; + +typedef struct db_host_status_list DB_HOST_STATUS_LIST; +struct db_host_status_list +{ + /* preferred_hosts + db-hosts */ + DB_HOST_STATUS hostlist[MAX_NUM_DB_HOSTS * 2]; + DB_HOST_STATUS *connected_host_status; + int last_host_idx; +}; /* Some like to assume that the db_ layer is able to recognize that a database has not been successfully restarted. For now, check every @@ -142,9 +158,9 @@ install_static_methods (void) int db_init (const char *program, int print_version, const char *dbname, const char *db_path, const char *vol_path, - const char *log_path, const char *lob_path, const char *host_name, const bool overwrite, - const char *comments, const char *addmore_vols_file, int npages, int desired_pagesize, int log_npages, - int desired_log_page_size, const char *lang_charset) + const char *log_path, const char *lob_path, const char *host_name, const bool overwrite, const char *comments, + const char *addmore_vols_file, int npages, int desired_pagesize, int log_npages, int desired_log_page_size, + const char *lang_charset) { #if defined (CUBRID_DEBUG) int value; diff --git a/src/compat/db_date.c b/src/compat/db_date.c index 96f2e3d5557..7a41bc3a5fd 100644 --- a/src/compat/db_date.c +++ b/src/compat/db_date.c @@ -29,6 +29,7 @@ #include #include "system.h" +#include "tz_support.h" #include "db_date.h" #include diff --git a/src/compat/db_date.h b/src/compat/db_date.h index 8cf14ab6fec..f52733cf758 100644 --- a/src/compat/db_date.h +++ b/src/compat/db_date.h @@ -29,23 +29,6 @@ #include -#include "tz_support.h" - -#define db_utime_to_string db_timestamp_to_string -#define db_string_to_utime db_string_to_timestamp -#define db_date_parse_utime db_date_parse_timestamp - -enum -{ - TIME_SPECIFIER = 1, - DATE_SPECIFIER = 2, - DATETIME_SPECIFIER = 3, - REMOVED_TIMETZ_SPECIFIER = 4, - DATETIMETZ_SPECIFIER = 5 -}; - -extern void db_date_locale_init (void); - /* DB_DATE functions */ extern int db_date_weekday (DB_DATE * date); extern int db_date_to_string (char *buf, int bufsize, DB_DATE * date); diff --git a/src/compat/db_elo.c b/src/compat/db_elo.c index 47f627cac07..8aa6fd53786 100644 --- a/src/compat/db_elo.c +++ b/src/compat/db_elo.c @@ -30,6 +30,7 @@ #include "storage_common.h" #include "object_primitive.h" #include "db.h" +#include "elo.h" #include "db_elo.h" #include "dbtype.h" diff --git a/src/compat/db_elo.h b/src/compat/db_elo.h index 0e157ea2557..b9a3efd59c3 100644 --- a/src/compat/db_elo.h +++ b/src/compat/db_elo.h @@ -26,8 +26,9 @@ #ident "$Id$" -#include "config.h" -#include "elo.h" +#include + +#include "dbtype_def.h" extern int db_create_fbo (DB_VALUE * value, DB_TYPE type); /* */ @@ -41,9 +42,4 @@ extern DB_BIGINT db_elo_size (DB_ELO * elo); extern int db_elo_read (const DB_ELO * elo, off_t pos, void *buf, size_t count, DB_BIGINT * read_bytes); extern int db_elo_write (DB_ELO * elo, off_t pos, void *buf, size_t count, DB_BIGINT * written_bytes); -#if defined (ENABLE_UNUSED_FUNCTION) -extern int db_elo_get_meta (const DB_ELO * elo, const char *key, char *buf, int bufsz); -extern int db_elo_set_meta (DB_ELO * elo, const char *key, const char *val); -#endif - #endif /* _DB_ELO_H_ */ diff --git a/src/compat/db_info.c b/src/compat/db_info.c index 952bc596ac9..b09817dce7e 100644 --- a/src/compat/db_info.c +++ b/src/compat/db_info.c @@ -30,7 +30,6 @@ #include "boot_cl.h" #include "class_object.h" #include "db.h" -#include "dbdef.h" #include "dbtype.h" #include "locator_cl.h" #include "mem_block.hpp" diff --git a/src/compat/db_json.cpp b/src/compat/db_json.cpp index b8e9c682c1e..68156b2d7b0 100644 --- a/src/compat/db_json.cpp +++ b/src/compat/db_json.cpp @@ -58,6 +58,7 @@ #include "dbtype.h" #include "memory_alloc.h" +#include "string_opfunc.h" #include "system_parameter.h" // we define COPY in storage_common.h, but so does rapidjson in its headers. We don't need the definition from storage @@ -115,6 +116,8 @@ typedef rapidjson::MemoryPoolAllocator JSON_PRIVATE_MEM typedef rapidjson::GenericValue JSON_VALUE; typedef rapidjson::GenericPointer JSON_POINTER; typedef rapidjson::GenericStringBuffer JSON_STRING_BUFFER; +typedef rapidjson::GenericMemberIterator::Iterator JSON_MEMBER_ITERATOR; +typedef rapidjson::GenericArray::ConstValueIterator JSON_VALUE_ITERATOR; class JSON_DOC: public rapidjson::GenericDocument { @@ -150,10 +153,189 @@ class JSON_DOC: public rapidjson::GenericDocument CopyFrom (*value, m_value_doc->GetAllocator ()); + + return m_value_doc; + } + + void reset () + { + m_input_doc = nullptr; // clear input + } + + bool is_empty () const + { + return m_input_doc == nullptr; // no input + } + + // delete only the content of the JSON_ITERATOR for reuse + void clear_content () + { + if (m_value_doc != nullptr) + { + db_json_delete_doc (m_value_doc); + } + } + + protected: + const JSON_DOC *m_input_doc; // document being iterated + JSON_DOC *m_value_doc; // document that can store iterator "value" +}; + +// JSON Object iterator - iterates through object members +// +class JSON_OBJECT_ITERATOR : public JSON_ITERATOR +{ + public: + JSON_OBJECT_ITERATOR () + : m_iterator () + { + // + } + + // advance to next member + void next () override; + // has more members + bool has_next () override; + + // get current member value + const JSON_VALUE *get () override + { + return &m_iterator->value; + } + + // set input document and initialize iterator on first position + void set (const JSON_DOC &new_doc) override + { + assert (new_doc.IsObject ()); + + m_input_doc = &new_doc; + m_iterator = new_doc.MemberBegin (); + } + + private: + JSON_MEMBER_ITERATOR m_iterator; +}; + +// JSON Array iterator - iterates through elements (values) +// +class JSON_ARRAY_ITERATOR : public JSON_ITERATOR +{ + public: + JSON_ARRAY_ITERATOR () + : m_iterator () + { + // + } + + // next element + void next () override; + // has more elements + bool has_next () override; + + const JSON_VALUE *get () override + { + return m_iterator; + } + + void set (const JSON_DOC &new_doc) override + { + assert (new_doc.IsArray ()); + + m_input_doc = &new_doc; + m_iterator = new_doc.GetArray ().Begin (); + } + + private: + JSON_VALUE_ITERATOR m_iterator; +}; + +void +JSON_ARRAY_ITERATOR::next () +{ + assert (has_next ()); + m_iterator++; +} + +bool +JSON_ARRAY_ITERATOR::has_next () +{ + if (m_input_doc == nullptr) + { + return false; + } + + JSON_VALUE_ITERATOR end = m_input_doc->GetArray ().End (); + + return (m_iterator + 1) != end; +} + +void +JSON_OBJECT_ITERATOR::next () +{ + assert (has_next ()); + m_iterator++; +} + +bool +JSON_OBJECT_ITERATOR::has_next () +{ + if (m_input_doc == nullptr) + { + return false; + } + + JSON_MEMBER_ITERATOR end = m_input_doc->MemberEnd (); + + return (m_iterator + 1) != end; +} + class JSON_VALIDATOR { public: - JSON_VALIDATOR (const char *schema_raw); + explicit JSON_VALIDATOR (const char *schema_raw); JSON_VALIDATOR (const JSON_VALIDATOR ©); JSON_VALIDATOR &operator= (const JSON_VALIDATOR ©); ~JSON_VALIDATOR (); @@ -173,73 +355,74 @@ class JSON_VALIDATOR }; /* -* JSON_BASE_HANDLER - This class acts like a rapidjson Handler -* -* The Handler is used by the json document to make checks on all of its nodes -* It is applied recursively by the Accept function and acts like a map functions -* You should inherit this class each time you want a specific function to apply to all the nodes in the json document -* and override only the methods that apply to the desired types of nodes -*/ + * JSON_BASE_HANDLER - This class acts like a rapidjson Handler + * + * The Handler is used by the json document to make checks on all of its nodes + * It is applied recursively by the Accept function and acts like a map functions + * You should inherit this class each time you want a specific function to apply to all the nodes in the json document + * and override only the methods that apply to the desired types of nodes + */ class JSON_BASE_HANDLER { public: - JSON_BASE_HANDLER () {}; + JSON_BASE_HANDLER () = default; + virtual ~JSON_BASE_HANDLER () = default; typedef typename JSON_DOC::Ch Ch; typedef unsigned SizeType; - bool Null () + virtual bool Null () { return true; } - bool Bool (bool b) + virtual bool Bool (bool b) { return true; } - bool Int (int i) + virtual bool Int (int i) { return true; } - bool Uint (unsigned i) + virtual bool Uint (unsigned i) { return true; } - bool Int64 (int64_t i) + virtual bool Int64 (std::int64_t i) { return true; } - bool Uint64 (uint64_t i) + virtual bool Uint64 (std::uint64_t i) { return true; } - bool Double (double d) + virtual bool Double (double d) { return true; } - bool RawNumber (const Ch *str, SizeType length, bool copy) + virtual bool RawNumber (const Ch *str, SizeType length, bool copy) { return true; } - bool String (const Ch *str, SizeType length, bool copy) + virtual bool String (const Ch *str, SizeType length, bool copy) { return true; } - bool StartObject () + virtual bool StartObject () { return true; } - bool Key (const Ch *str, SizeType length, bool copy) + virtual bool Key (const Ch *str, SizeType length, bool copy) { return true; } - bool EndObject (SizeType memberCount) + virtual bool EndObject (SizeType memberCount) { return true; } - bool StartArray () + virtual bool StartArray () { return true; } - bool EndArray (SizeType elementCount) + virtual bool EndArray (SizeType elementCount) { return true; } @@ -259,8 +442,8 @@ class JSON_WALKER protected: // we should not instantiate this class, but extend it - JSON_WALKER() {} - virtual ~JSON_WALKER() {} + JSON_WALKER () = default; + virtual ~JSON_WALKER () = default; virtual int CallBefore (JSON_VALUE &value) @@ -276,6 +459,20 @@ class JSON_WALKER return NO_ERROR; } + virtual int + CallOnArrayIterate () + { + // do nothing + return NO_ERROR; + } + + virtual int + CallOnKeyIterate (JSON_VALUE &key) + { + // do nothing + return NO_ERROR; + } + private: int WalkValue (JSON_VALUE &value); }; @@ -290,18 +487,57 @@ class JSON_WALKER class JSON_DUPLICATE_KEYS_CHECKER : public JSON_WALKER { public: - JSON_DUPLICATE_KEYS_CHECKER () {} - ~JSON_DUPLICATE_KEYS_CHECKER () {} + JSON_DUPLICATE_KEYS_CHECKER () = default; + ~JSON_DUPLICATE_KEYS_CHECKER () override = default; private: - int CallBefore (JSON_VALUE &value); + int CallBefore (JSON_VALUE &value) override; +}; + +class JSON_SEARCHER : public JSON_WALKER +{ + public: + JSON_SEARCHER (std::string starting_path, const DB_VALUE *pattern, const DB_VALUE *esc_char, bool find_all, + std::vector &paths) + : m_starting_path (std::move (starting_path)) + , m_found_paths (paths) + , m_find_all (find_all) + , m_skip_search (false) + , m_pattern (pattern) + , m_esc_char (esc_char) + { + // + } + + ~JSON_SEARCHER () override = default; + + private: + + int CallBefore (JSON_VALUE &value) override; + int CallAfter (JSON_VALUE &value) override; + int CallOnArrayIterate () override; + int CallOnKeyIterate (JSON_VALUE &key) override; + + std::stack m_index; + std::vector path_items; + std::string m_starting_path; + std::vector &m_found_paths; + bool m_find_all; + bool m_skip_search; + const DB_VALUE *m_pattern; + const DB_VALUE *m_esc_char; }; class JSON_SERIALIZER_LENGTH : public JSON_BASE_HANDLER { public: - JSON_SERIALIZER_LENGTH() : m_length (0) {} - ~JSON_SERIALIZER_LENGTH() {} + JSON_SERIALIZER_LENGTH () + : m_length (0) + { + // + } + + ~JSON_SERIALIZER_LENGTH () override = default; std::size_t GetLength () const { @@ -318,16 +554,17 @@ class JSON_SERIALIZER_LENGTH : public JSON_BASE_HANDLER return or_packed_string_length (str, NULL); } - bool Null (); - bool Bool (bool b); - bool Int (int i); - bool Double (double d); - bool String (const Ch *str, SizeType length, bool copy); - bool StartObject(); - bool Key (const Ch *str, SizeType length, bool copy); - bool StartArray (); - bool EndObject (SizeType memberCount); - bool EndArray (SizeType elementCount); + bool Null () override; + bool Bool (bool b) override; + bool Int (int i) override; + bool Int64 (std::int64_t i) override; + bool Double (double d) override; + bool String (const Ch *str, SizeType length, bool copy) override; + bool StartObject () override; + bool Key (const Ch *str, SizeType length, bool copy) override; + bool StartArray () override; + bool EndObject (SizeType memberCount) override; + bool EndArray (SizeType elementCount) override; private: std::size_t m_length; @@ -336,23 +573,27 @@ class JSON_SERIALIZER_LENGTH : public JSON_BASE_HANDLER class JSON_SERIALIZER : public JSON_BASE_HANDLER { public: - JSON_SERIALIZER (OR_BUF &buffer) - : m_buffer (&buffer) + explicit JSON_SERIALIZER (OR_BUF &buffer) + : m_error (NO_ERROR) + , m_buffer (&buffer) , m_size_pointers () { + // } - ~JSON_SERIALIZER() {} - bool Null (); - bool Bool (bool b); - bool Int (int i); - bool Double (double d); - bool String (const Ch *str, SizeType length, bool copy); - bool StartObject(); - bool Key (const Ch *str, SizeType length, bool copy); - bool StartArray (); - bool EndObject (SizeType memberCount); - bool EndArray (SizeType elementCount); + ~JSON_SERIALIZER () override = default; + + bool Null () override; + bool Bool (bool b) override; + bool Int (int i) override; + bool Int64 (std::int64_t i) override; + bool Double (double d) override; + bool String (const Ch *str, SizeType length, bool copy) override; + bool StartObject () override; + bool Key (const Ch *str, SizeType length, bool copy) override; + bool StartArray () override; + bool EndObject (SizeType memberCount) override; + bool EndArray (SizeType elementCount) override; private: bool SaveSizePointers (char *ptr); @@ -372,6 +613,77 @@ class JSON_SERIALIZER : public JSON_BASE_HANDLER // member/element count is saved at the end }; +/* + * JSON_PRETTY_WRITER - This class extends JSON_BASE_HANDLER + * + * The JSON document accepts the Handler and walks the document with respect to the DB_JSON_TYPE. + * The context is kept in the m_level_iterable stack which contains the value from the current level, which + * can be ARRAY, OBJECT or SCALAR. In case we are in an iterable (ARRAY/OBJECT) we need to keep track if of the first + * element because it's important for printing the delimiters. + * + * The formatting output respects the following rules: + * - Each array element or object member appears on a separate line, indented by one additional level as + * compared to its parent + * - Each level of indentation adds two leading spaces + * - A comma separating individual array elements or object members is printed before the newline that + * separates the two elements or members + * - The key and the value of an object member are separated by a colon followed by a space (': ') + * - An empty object or array is printed on a single line. No space is printed between the opening and closing brace + */ +class JSON_PRETTY_WRITER : public JSON_BASE_HANDLER +{ + public: + JSON_PRETTY_WRITER () + : m_buffer () + , m_current_indent (0) + { + // default ctor + } + + ~JSON_PRETTY_WRITER () override = default; + + bool Null () override; + bool Bool (bool b) override; + bool Int (int i) override; + bool Int64 (std::int64_t i) override; + bool Double (double d) override; + bool String (const Ch *str, SizeType length, bool copy) override; + bool StartObject () override; + bool Key (const Ch *str, SizeType length, bool copy) override; + bool StartArray () override; + bool EndObject (SizeType memberCount) override; + bool EndArray (SizeType elementCount) override; + + std::string &ToString () + { + return m_buffer; + } + + private: + void WriteDelimiters (bool is_key = false); + void PushLevel (const DB_JSON_TYPE &type); + void PopLevel (); + void SetIndentOnNewLine (); + + struct level_context + { + DB_JSON_TYPE type; + bool is_first; + + level_context (DB_JSON_TYPE type, bool is_first) + : type (type) + , is_first (is_first) + { + // + } + }; + + std::string m_buffer; // the buffer that stores the json + size_t m_current_indent; // number of white spaces for the current level + static const size_t LEVEL_INDENT_UNIT = 2; // number of white spaces of indent level + std::stack m_level_stack; // keep track of the current iterable (ARRAY/OBJECT) +}; + const bool JSON_PRIVATE_ALLOCATOR::kNeedFree = true; const int JSON_DOC::MAX_CHUNK_SIZE = 64 * 1024; /* TODO does 64K serve our needs? */ @@ -388,18 +700,25 @@ static int db_json_value_is_contained_in_doc_helper (const JSON_VALUE *doc, cons static DB_JSON_TYPE db_json_get_type_of_value (const JSON_VALUE *val); static bool db_json_value_has_numeric_type (const JSON_VALUE *doc); static int db_json_get_int_from_value (const JSON_VALUE *val); +static std::int64_t db_json_get_bigint_from_value (const JSON_VALUE *val); static double db_json_get_double_from_value (const JSON_VALUE *doc); static const char *db_json_get_string_from_value (const JSON_VALUE *doc); static char *db_json_copy_string_from_value (const JSON_VALUE *doc); static char *db_json_get_bool_as_str_from_value (const JSON_VALUE *doc); +static bool db_json_get_bool_from_value (const JSON_VALUE *doc); static char *db_json_bool_to_string (bool b); -static void db_json_merge_two_json_objects (JSON_DOC &first, const JSON_DOC *second); -static void db_json_merge_two_json_arrays (JSON_DOC &array1, const JSON_DOC *array2); -static void db_json_merge_two_json_by_array_wrapping (JSON_DOC &j1, const JSON_DOC *j2); +static void db_json_merge_two_json_objects_preserve (const JSON_VALUE *source, JSON_VALUE &dest, + JSON_PRIVATE_MEMPOOL &allocator); +static void db_json_merge_two_json_objects_patch (const JSON_VALUE *source, JSON_VALUE &dest, + JSON_PRIVATE_MEMPOOL &allocator); +static void db_json_merge_two_json_arrays (JSON_DOC &dest, const JSON_DOC *source); +static void db_json_merge_two_json_by_array_wrapping (JSON_DOC &dest, const JSON_DOC *source); static void db_json_copy_doc (JSON_DOC &dest, const JSON_DOC *src); static void db_json_get_paths_helper (const JSON_VALUE &obj, const std::string &sql_path, std::vector &paths); +static int db_json_search_helper (JSON_DOC &obj, const DB_VALUE *pattern, const DB_VALUE *esc_char, + bool find_all, const std::string &sql_path, bool &found, std::vector &paths); static void db_json_normalize_path (std::string &path_string); static void db_json_remove_leading_zeros_index (std::string &index); static bool db_json_isspace (const unsigned char &ch); @@ -424,6 +743,8 @@ static const char *db_json_get_json_type_as_str (const DB_JSON_TYPE &json_type); static int db_json_er_set_expected_other_type (const char *file_name, const int line_no, const std::string &path, const DB_JSON_TYPE &found_type, const DB_JSON_TYPE &expected_type, const DB_JSON_TYPE &expected_type_optional = DB_JSON_NULL); +static int db_json_array_shift_values (const JSON_DOC *value, JSON_DOC &doc, const std::string &path); +static int db_json_resolve_json_parent (JSON_DOC &doc, const std::string &path, JSON_VALUE *&resulting_json_parent); static int db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, const std::string &path); static int db_json_contains_duplicate_keys (JSON_DOC &doc); static int db_json_keys_func (const JSON_DOC &doc, JSON_DOC &result_json, const char *raw_path); @@ -438,10 +759,14 @@ static int db_json_deserialize_doc_internal (OR_BUF *buf, JSON_VALUE &value, JSO static int db_json_or_buf_underflow (OR_BUF *buf, size_t length); static int db_json_unpack_string_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEMPOOL &doc_allocator); static int db_json_unpack_int_to_value (OR_BUF *buf, JSON_VALUE &value); +static int db_json_unpack_bigint_to_value (OR_BUF *buf, JSON_VALUE &value); static int db_json_unpack_bool_to_value (OR_BUF *buf, JSON_VALUE &value); static int db_json_unpack_object_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEMPOOL &doc_allocator); static int db_json_unpack_array_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEMPOOL &doc_allocator); +static void db_json_merge_func_preserve (const JSON_DOC *source, JSON_DOC &dest); +static void db_json_merge_func_patch (const JSON_VALUE *source, JSON_VALUE &dest, JSON_PRIVATE_MEMPOOL &allocator); + int JSON_DUPLICATE_KEYS_CHECKER::CallBefore (JSON_VALUE &value) { std::vector inserted_keys; @@ -468,9 +793,106 @@ int JSON_DUPLICATE_KEYS_CHECKER::CallBefore (JSON_VALUE &value) return NO_ERROR; } -JSON_VALIDATOR::JSON_VALIDATOR (const char *schema_raw) : m_schema (NULL), - m_validator (NULL), - m_is_loaded (false) +int JSON_SEARCHER::CallBefore (JSON_VALUE &value) +{ + if (m_skip_search) + { + return NO_ERROR; + } + + if (value.IsArray ()) + { + m_index.push (0); + } + + if (value.IsObject () || value.IsArray ()) + { + path_items.emplace_back (); + } + + return NO_ERROR; +} + +int JSON_SEARCHER::CallAfter (JSON_VALUE &value) +{ + if (m_skip_search) + { + return NO_ERROR; + } + + int error_code = NO_ERROR; + + if (value.IsString ()) + { + const char *json_str = value.GetString (); + DB_VALUE str_val; + + db_make_null (&str_val); + error_code = db_make_string (&str_val, (char *) json_str); + if (error_code) + { + return error_code; + } + + int match; + db_string_like (&str_val, m_pattern, m_esc_char, &match); + + if (match) + { + std::stringstream full_path; + + full_path << "\"" << m_starting_path; + for (const auto &item : path_items) + { + full_path << item; + } + full_path << "\""; + + m_found_paths.emplace_back (full_path.str ()); + + if (!m_find_all) + { + m_skip_search = true; + } + } + } + + if (value.IsArray ()) + { + m_index.pop (); + } + + if (value.IsArray () || value.IsObject ()) + { + path_items.pop_back (); + } + + return error_code; +} + +int JSON_SEARCHER::CallOnKeyIterate (JSON_VALUE &key) +{ + std::string path_item = "."; + path_item += key.GetString (); + + path_items.back () = path_item; + return NO_ERROR; +} + +int JSON_SEARCHER::CallOnArrayIterate () +{ + std::string path_item = "["; + path_item += std::to_string (m_index.top ()++); + path_item += "]"; + + path_items.back () = path_item; + return NO_ERROR; +} + +JSON_VALIDATOR::JSON_VALIDATOR (const char *schema_raw) + : m_schema (NULL), + m_validator (NULL), + m_is_loaded (false) { m_schema_raw = strdup (schema_raw); /* @@ -484,11 +906,13 @@ JSON_VALIDATOR::~JSON_VALIDATOR (void) if (m_schema != NULL) { delete m_schema; + m_schema = NULL; } if (m_validator != NULL) { delete m_validator; + m_validator = NULL; } if (m_schema_raw != NULL) @@ -664,6 +1088,76 @@ db_json_doc_to_value (const JSON_DOC &doc) return reinterpret_cast (doc); } +void +db_json_iterator_next (JSON_ITERATOR &json_itr) +{ + json_itr.next (); +} + +const JSON_DOC * +db_json_iterator_get_document (JSON_ITERATOR &json_itr) +{ + return json_itr.get_value_to_doc (); +} + +bool +db_json_iterator_has_next (JSON_ITERATOR &json_itr) +{ + return json_itr.has_next (); +} + +void +db_json_set_iterator (JSON_ITERATOR *&json_itr, const JSON_DOC &new_doc) +{ + json_itr->set (new_doc); +} + +void +db_json_reset_iterator (JSON_ITERATOR *&json_itr) +{ + if (json_itr != NULL) + { + json_itr->reset (); + } +} + +bool +db_json_iterator_is_empty (const JSON_ITERATOR &json_itr) +{ + return json_itr.is_empty (); +} + +JSON_ITERATOR * +db_json_create_iterator (const DB_JSON_TYPE &type) +{ + if (type == DB_JSON_TYPE::DB_JSON_OBJECT) + { + return new JSON_OBJECT_ITERATOR (); + } + else if (type == DB_JSON_TYPE::DB_JSON_ARRAY) + { + return new JSON_ARRAY_ITERATOR (); + } + + return NULL; +} + +void +db_json_delete_json_iterator (JSON_ITERATOR *&json_itr) +{ + delete json_itr; + json_itr = NULL; +} + +void +db_json_clear_json_iterator (JSON_ITERATOR *&json_itr) +{ + if (json_itr != NULL) + { + json_itr->clear_content (); + } +} + bool db_json_is_valid (const char *json_str) { @@ -690,6 +1184,10 @@ db_json_get_type_as_str (const JSON_DOC *document) { return "INTEGER"; } + else if (document->IsInt64 ()) + { + return "BIGINT"; + } else if (document->IsDouble ()) { return "DOUBLE"; @@ -725,6 +1223,8 @@ db_json_get_json_type_as_str (const DB_JSON_TYPE &json_type) return "JSON_OBJECT"; case DB_JSON_INT: return "INTEGER"; + case DB_JSON_BIGINT: + return "BIGINT"; case DB_JSON_DOUBLE: return "DOUBLE"; case DB_JSON_STRING: @@ -761,7 +1261,7 @@ db_json_get_length (const JSON_DOC *document) if (document->IsObject ()) { - int length = 0; + unsigned int length = 0; for (JSON_VALUE::ConstMemberIterator itr = document->MemberBegin (); itr != document->MemberEnd (); ++itr) { @@ -775,7 +1275,7 @@ db_json_get_length (const JSON_DOC *document) } /* - * json_depth() + * json_depth () * one array or one object increases the depth by 1 */ @@ -785,6 +1285,32 @@ db_json_get_depth (const JSON_DOC *doc) return db_json_value_get_depth (doc); } +/* + * db_json_unquote () + * skip escaping for JSON_DOC strings + */ + +int +db_json_unquote (const JSON_DOC &doc, char *&result_str) +{ + assert (result_str == nullptr); + + if (!doc.IsString ()) + { + result_str = db_json_get_raw_json_body_from_document (&doc); + } + else + { + result_str = db_private_strdup (NULL, doc.GetString ()); + + if (result_str == nullptr) + { + return ER_OUT_OF_VIRTUAL_MEMORY; + } + } + return NO_ERROR; +} + static unsigned int db_json_value_get_depth (const JSON_VALUE *doc) { @@ -796,57 +1322,128 @@ db_json_value_get_depth (const JSON_VALUE *doc) { unsigned int depth = db_json_value_get_depth (itr); - if (depth > max) - { - max = depth; - } - } + if (depth > max) + { + max = depth; + } + } + + return max + 1; + } + else if (doc->IsObject ()) + { + unsigned int max = 0; + + for (JSON_VALUE::ConstMemberIterator itr = doc->MemberBegin (); itr != doc->MemberEnd (); ++itr) + { + unsigned int depth = db_json_value_get_depth (&itr->value); + + if (depth > max) + { + max = depth; + } + } + + return max + 1; + } + else + { + /* no depth */ + return 1; + } +} + +/* + * db_json_extract_document_from_path () - Extracts from within the json a value based on the given path + * + * return : error code + * doc_to_be_inserted (in) : document to be inserted + * doc_destination (in) : destination document + * raw_path (in) : insertion path + * example : json_extract('{"a":["b", 123]}', '/a/1') yields 123 + */ + +int +db_json_extract_document_from_path (const JSON_DOC *document, const char *raw_path, JSON_DOC *&result) +{ + int error_code = NO_ERROR; + std::string json_pointer_string; + + if (document == NULL) + { + if (result != NULL) + { + result->SetNull (); + } + return NO_ERROR; + } + + // path must be JSON pointer + error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + JSON_POINTER p (json_pointer_string.c_str ()); + const JSON_VALUE *resulting_json = NULL; + + if (!p.IsValid ()) + { + result = NULL; + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_JSON_INVALID_PATH, 0); + return ER_JSON_INVALID_PATH; + } + + // the json from the specified path + resulting_json = p.Get (*document); + + DB_JSON_TYPE type = db_json_get_type (document); - return max + 1; - } - else if (doc->IsObject ()) + if (resulting_json != NULL) { - unsigned int max = 0; - - for (JSON_VALUE::ConstMemberIterator itr = doc->MemberBegin (); itr != doc->MemberEnd (); ++itr) + if (result == NULL) { - unsigned int depth = db_json_value_get_depth (&itr->value); - - if (depth > max) - { - max = depth; - } + result = db_json_allocate_doc (); } - return max + 1; + result->CopyFrom (*resulting_json, result->GetAllocator ()); } else { - /* no depth */ - return 1; + if (result != NULL) + { + result->SetNull (); + } } + + return NO_ERROR; } /* - * db_json_extract_document_from_path () - Extracts from within the json a value based on the given path + * db_json_contains_path () - Checks if the document contains data at given path * * return : error code - * doc_to_be_inserted (in) : document to be inserted - * doc_destination (in) : destination document - * raw_path (in) : insertion path - * example : json_extract('{"a":["b", 123]}', '/a/1') yields 123 + * document (in) : document where to search + * raw_path (in) : check path + * result (out) : true/false */ - int -db_json_extract_document_from_path (const JSON_DOC *document, const char *raw_path, JSON_DOC *&result) +db_json_contains_path (const JSON_DOC *document, const char *raw_path, bool &result) { int error_code = NO_ERROR; std::string json_pointer_string; - result = NULL; + + result = false; + + if (document == NULL) + { + return false; + } // path must be JSON pointer error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); - if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -858,22 +1455,14 @@ db_json_extract_document_from_path (const JSON_DOC *document, const char *raw_pa if (!p.IsValid ()) { - result = NULL; er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_JSON_INVALID_PATH, 0); return ER_JSON_INVALID_PATH; } - // the json from the specified path resulting_json = p.Get (*document); - if (resulting_json != NULL) { - result = db_json_allocate_doc (); - result->CopyFrom (*resulting_json, result->GetAllocator ()); - } - else - { - result = NULL; + result = true; } return NO_ERROR; @@ -883,15 +1472,13 @@ char * db_json_get_raw_json_body_from_document (const JSON_DOC *doc) { JSON_STRING_BUFFER buffer; - rapidjson::Writer writer (buffer); - char *json_body; + rapidjson::Writer json_default_writer (buffer); buffer.Clear (); - doc->Accept (writer); - json_body = db_private_strdup (NULL, buffer.GetString ()); + doc->Accept (json_default_writer); - return json_body; + return db_private_strdup (NULL, buffer.GetString ()); } char * @@ -900,10 +1487,10 @@ db_json_get_json_body_from_document (const JSON_DOC &doc) #if TODO_OPTIMIZE_JSON_BODY_STRING /* TODO std::string json_body (std::unique_ptr - (db_json_get_raw_json_body_from_document (&doc), JSON_RAW_STRING_DELETER()).get()); + (db_json_get_raw_json_body_from_document (&doc), JSON_RAW_STRING_DELETER ()).get ()); doc.SetJsonBody (json_body); - return doc.GetJsonBody().c_str(); + return doc.GetJsonBody ().c_str (); */ #endif // TODO_OPTIMIZE_JSON_BODY_STRING @@ -928,6 +1515,7 @@ db_json_add_json_value_to_object (JSON_DOC &doc, const char *name, JSON_VALUE &v key.SetString (name, (rapidjson::SizeType) strlen (name), doc.GetAllocator ()); doc.AddMember (key, value, doc.GetAllocator ()); + return NO_ERROR; } @@ -935,6 +1523,7 @@ int db_json_add_member_to_object (JSON_DOC *doc, const char *name, const char *value) { JSON_VALUE val; + val.SetString (value, (rapidjson::SizeType) strlen (value), doc->GetAllocator ()); return db_json_add_json_value_to_object (*doc, name, val); @@ -944,11 +1533,22 @@ int db_json_add_member_to_object (JSON_DOC *doc, const char *name, int value) { JSON_VALUE val; + val.SetInt (value); return db_json_add_json_value_to_object (*doc, name, val); } +int +db_json_add_member_to_object (JSON_DOC *doc, const char *name, std::int64_t value) +{ + JSON_VALUE val; + + val.SetInt64 (value); + + return db_json_add_json_value_to_object (*doc, name, val); +} + int db_json_add_member_to_object (JSON_DOC *doc, const char *name, const JSON_DOC *value) { @@ -970,6 +1570,7 @@ int db_json_add_member_to_object (JSON_DOC *doc, const char *name, double value) { JSON_VALUE val; + val.SetDouble (value); return db_json_add_json_value_to_object (*doc, name, val); @@ -1005,6 +1606,17 @@ db_json_add_element_to_array (JSON_DOC *doc, int value) doc->PushBack (JSON_VALUE ().SetInt (value), doc->GetAllocator ()); } +void +db_json_add_element_to_array (JSON_DOC *doc, std::int64_t value) +{ + if (!doc->IsArray ()) + { + doc->SetArray (); + } + + doc->PushBack (JSON_VALUE ().SetInt64 (value), doc->GetAllocator ()); +} + void db_json_add_element_to_array (JSON_DOC *doc, double value) { @@ -1053,7 +1665,7 @@ db_json_contains_duplicate_keys (JSON_DOC &doc) error_code = dup_keys_checker.WalkDocument (doc); if (error_code != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); } return error_code; @@ -1063,6 +1675,7 @@ static int db_json_get_json_from_str (const char *json_raw, JSON_DOC &doc, size_t json_raw_length) { int error_code = NO_ERROR; + if (json_raw == NULL) { return NO_ERROR; @@ -1078,7 +1691,7 @@ db_json_get_json_from_str (const char *json_raw, JSON_DOC &doc, size_t json_raw_ error_code = db_json_contains_duplicate_keys (doc); if (error_code != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); return error_code; } @@ -1134,9 +1747,9 @@ db_json_copy_doc (JSON_DOC &dest, const JSON_DOC *src) } static int -db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, const std::string &path) +db_json_resolve_json_parent (JSON_DOC &doc, const std::string &path, JSON_VALUE *&resulting_json_parent) { - std::size_t found = path.find_last_of ("/"); + std::size_t found = path.find_last_of ('/'); if (found == std::string::npos) { assert (false); @@ -1144,7 +1757,7 @@ db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, co } // parent pointer - const JSON_POINTER pointer_parent (path.substr (0, found).c_str ()); + JSON_POINTER pointer_parent (path.substr (0, found).c_str ()); if (!pointer_parent.IsValid ()) { /* this shouldn't happen */ @@ -1152,7 +1765,7 @@ db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, co return ER_FAILED; } - JSON_VALUE *resulting_json_parent = pointer_parent.Get (doc); + resulting_json_parent = pointer_parent.Get (doc); // the parent does not exist if (resulting_json_parent == NULL) { @@ -1181,9 +1794,27 @@ db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, co return db_json_er_set_expected_other_type (ARG_FILE_LINE, path, parent_json_type, DB_JSON_ARRAY); } + return NO_ERROR; +} + +static int +db_json_insert_helper (const JSON_DOC *value, JSON_DOC &doc, JSON_POINTER &p, const std::string &path) +{ + int error_code = NO_ERROR; + JSON_VALUE *resulting_json_parent = NULL; + + // we don't need result_json_parent after this statement + error_code = db_json_resolve_json_parent (doc, path, resulting_json_parent); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + // put the value at the specified path p.Set (doc, *value, doc.GetAllocator ()); - return NO_ERROR; + + return error_code; } /* @@ -1256,7 +1887,6 @@ db_json_replace_func (const JSON_DOC *new_value, JSON_DOC &doc, const char *raw_ // path must be JSON pointer error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); - if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -1307,7 +1937,6 @@ db_json_set_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path) // path must be JSON pointer error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); - if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -1324,7 +1953,6 @@ db_json_set_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path) } resulting_json = p.Get (doc); - if (resulting_json != NULL) { // replace the old value with the new one if the path exists @@ -1351,7 +1979,6 @@ db_json_remove_func (JSON_DOC &doc, const char *raw_path) // path must be JSON pointer error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); - if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -1378,6 +2005,51 @@ db_json_remove_func (JSON_DOC &doc, const char *raw_path) return NO_ERROR; } +/* + * db_json_search_func () - Find json values that match the pattern and gather their paths + * + * return : error code + * doc (in) : json document + * pattern (in) : pattern to match against + * esc_char (in) : escape sequence used to match the pattern + * starting_paths (in) : prefixes used in the search + * paths (out) : full paths found + */ +int +db_json_search_func (JSON_DOC &doc, const DB_VALUE *pattern, const DB_VALUE *esc_char, bool find_all, + std::vector &starting_paths, std::vector &paths) +{ + for (auto &starting_path : starting_paths) + { + JSON_DOC *resolved = nullptr; + int error_code = db_json_extract_document_from_path (&doc, starting_path.c_str (), resolved); + + if (error_code != NO_ERROR) + { + return error_code; + } + + if (resolved == nullptr) + { + continue; + } + + bool found = false; + error_code = db_json_search_helper (*resolved, pattern, esc_char, find_all, starting_path, found, paths); + if (error_code != NO_ERROR) + { + return error_code; + } + + if (found && !find_all) + { + break; + } + } + + return NO_ERROR; +} + /* * db_json_array_append_func () - Append the value to the end of the indicated array within a JSON document * @@ -1401,7 +2073,6 @@ db_json_array_append_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw // path must be JSON pointer error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); - if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -1418,7 +2089,6 @@ db_json_array_append_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw } resulting_json = p.Get (doc); - if (resulting_json == NULL) { return db_json_er_set_path_does_not_exist (ARG_FILE_LINE, json_pointer_string, &doc); @@ -1440,6 +2110,85 @@ db_json_array_append_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw return NO_ERROR; } +static int +db_json_array_shift_values (const JSON_DOC *value, JSON_DOC &doc, const std::string &path) +{ + int error_code = NO_ERROR; + JSON_VALUE *resulting_json_parent = NULL; + + error_code = db_json_resolve_json_parent (doc, path, resulting_json_parent); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + assert (resulting_json_parent != NULL && resulting_json_parent->IsArray ()); + + int last_token_index = std::stoi (path.substr (path.find_last_of ('/') + 1)); + + // add the value at the end of the array + JSON_VALUE value_copy (*value, doc.GetAllocator ()); + resulting_json_parent->GetArray ().PushBack (value_copy, doc.GetAllocator ()); + + // move the value to its correct index by swapping adjacent values + for (int i = resulting_json_parent->GetArray ().Size () - 1; i > last_token_index; --i) + { + resulting_json_parent->GetArray ()[i].Swap (resulting_json_parent->GetArray ()[i - 1]); + } + + return NO_ERROR; +} + +/* + * db_json_array_insert_func () - Insert the value to the path from the indicated array within a JSON document + * + * return : error code + * value (in) : the value to be added in the array + * doc (in) : json document + * raw_path (in) : specified path + */ +int +db_json_array_insert_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path) +{ + int error_code = NO_ERROR; + std::string json_pointer_string; + + if (value == NULL) + { + // unexpected + assert (false); + return ER_FAILED; + } + + // path must be JSON pointer + error_code = db_json_convert_sql_path_to_pointer (raw_path, json_pointer_string); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + JSON_POINTER p (json_pointer_string.c_str ()); + JSON_VALUE *resulting_json = NULL; + + if (!p.IsValid ()) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_JSON_INVALID_PATH, 0); + return ER_JSON_INVALID_PATH; + } + + resulting_json = p.Get (doc); + if (resulting_json != NULL) + { + // need to shift any following values to the right + return db_json_array_shift_values (value, doc, json_pointer_string); + } + + // here starts the INSERTION part + return db_json_insert_helper (value, doc, p, json_pointer_string); +} + DB_JSON_TYPE db_json_get_type (const JSON_DOC *doc) { @@ -1462,6 +2211,10 @@ db_json_get_type_of_value (const JSON_VALUE *val) { return DB_JSON_INT; } + else if (val->IsInt64 ()) + { + return DB_JSON_BIGINT; + } else if (val->IsFloat () || val->IsDouble ()) { return DB_JSON_DOUBLE; @@ -1487,25 +2240,72 @@ db_json_get_type_of_value (const JSON_VALUE *val) } /* - * db_json_merge_two_json_objects () - Merge the source object into the destination object + * db_json_merge_two_json_objects_patch () - Merge the source object into the destination object handling duplicate keys + * + * return : error code + * dest (in) : json where to merge + * source (in) : json to merge + * example : let dest = '{"a" : "b"}' + * let source = '{"a" : 3}' + * after JSON_MERGE_PATCH (dest, source), dest = {"a" : 3} + */ +void +db_json_merge_two_json_objects_patch (const JSON_VALUE *source, JSON_VALUE &dest, JSON_PRIVATE_MEMPOOL &allocator) +{ + JSON_VALUE source_copy; + + assert (dest.IsObject () && source->IsObject ()); + + // create a copy for the source json + source_copy.CopyFrom (*source, allocator); + + // iterate through each member from the source json and insert it into the dest + for (JSON_VALUE::MemberIterator itr = source_copy.MemberBegin (); itr != source_copy.MemberEnd (); ++itr) + { + const char *name = itr->name.GetString (); + + // if the key is in both jsons + if (dest.HasMember (name)) + { + // if the second argument value is DB_JSON_NULL, remove that member + if (itr->value.IsNull ()) + { + dest.RemoveMember (name); + } + else + { + // recursively merge_patch with the current values from both JSON_OBJECTs + db_json_merge_func_patch (&itr->value, dest[name], allocator); + } + } + else + { + dest.AddMember (itr->name, itr->value, allocator); + } + } +} + +/* + * db_json_merge_two_json_objects_preserve () - Merge the source object into the destination object, + * preserving duplicate keys (adding their values in a JSON_ARRAY) * * return : error code * dest (in) : json where to merge * source (in) : json to merge + * patch (in) : (true/false) preserve or not the duplicate keys * example : let dest = '{"a" : "b"}' * let source = '{"c" : "d"}' - * after JSON_MERGE(dest, source), dest = {"a" : "b", "c" : "d"} + * after JSON_MERGE (dest, source), dest = {"a" : "b", "c" : "d"} */ void -db_json_merge_two_json_objects (JSON_DOC &dest, const JSON_DOC *source) +db_json_merge_two_json_objects_preserve (const JSON_VALUE *source, JSON_VALUE &dest, JSON_PRIVATE_MEMPOOL &allocator) { JSON_VALUE source_copy; - assert (db_json_get_type (&dest) == DB_JSON_OBJECT); - assert (db_json_get_type (source) == DB_JSON_OBJECT); + assert (dest.IsObject () && source->IsObject ()); // create a copy for the source json - source_copy.CopyFrom (*source, dest.GetAllocator ()); + source_copy.CopyFrom (*source, allocator); // iterate through each member from the source json and insert it into the dest for (JSON_VALUE::MemberIterator itr = source_copy.MemberBegin (); itr != source_copy.MemberEnd (); ++itr) @@ -1515,19 +2315,19 @@ db_json_merge_two_json_objects (JSON_DOC &dest, const JSON_DOC *source) // if the key is in both jsons if (dest.HasMember (name)) { - if (dest [name].IsArray ()) + if (dest[name].IsArray ()) { - dest [name].GetArray ().PushBack (itr->value, dest.GetAllocator ()); + dest[name].GetArray ().PushBack (itr->value, allocator); } else { - db_json_value_wrap_as_array (dest[name], dest.GetAllocator ()); - dest [name].PushBack (itr->value, dest.GetAllocator ()); + db_json_value_wrap_as_array (dest[name], allocator); + dest[name].PushBack (itr->value, allocator); } } else { - dest.AddMember (itr->name, itr->value, dest.GetAllocator ()); + dest.AddMember (itr->name, itr->value, allocator); } } } @@ -1632,7 +2432,7 @@ db_json_validate_json (const char *json_body) JSON_DOC *db_json_allocate_doc () { - JSON_DOC *doc = new JSON_DOC(); + JSON_DOC *doc = new JSON_DOC (); return doc; } @@ -1674,44 +2474,86 @@ db_json_validate_doc (JSON_VALIDATOR *validator, JSON_DOC *doc) return validator->validate (doc); } -void -db_json_delete_validator (JSON_VALIDATOR *&validator) +void +db_json_delete_validator (JSON_VALIDATOR *&validator) +{ + delete validator; + validator = NULL; +} + +bool +db_json_are_validators_equal (JSON_VALIDATOR *val1, JSON_VALIDATOR *val2) +{ + if (val1 != NULL && val2 != NULL) + { + return (strcmp (val1->get_schema_raw (), val2->get_schema_raw ()) == 0); + } + else + { + return val1 == NULL && val2 == NULL; + } +} + +static void +db_json_merge_func_preserve (const JSON_DOC *source, JSON_DOC &dest) +{ + DB_JSON_TYPE dest_type = db_json_get_type (&dest); + DB_JSON_TYPE source_type = db_json_get_type (source); + + if (dest_type == source_type) + { + if (dest_type == DB_JSON_OBJECT) + { + db_json_merge_two_json_objects_preserve (source, dest, dest.GetAllocator ()); + } + else if (dest_type == DB_JSON_ARRAY) + { + db_json_merge_two_json_arrays (dest, source); + } + else + { + db_json_merge_two_json_by_array_wrapping (dest, source); + } + } + else + { + db_json_merge_two_json_by_array_wrapping (dest, source); + } +} + +static void +db_json_merge_func_patch (const JSON_VALUE *source, JSON_VALUE &dest, JSON_PRIVATE_MEMPOOL &allocator) { - delete validator; - validator = NULL; -} + DB_JSON_TYPE dest_type = db_json_get_type_of_value (&dest); + DB_JSON_TYPE source_type = db_json_get_type_of_value (source); -bool -db_json_are_validators_equal (JSON_VALIDATOR *val1, JSON_VALIDATOR *val2) -{ - if (val1 != NULL && val2 != NULL) - { - return (strcmp (val1->get_schema_raw (), val2->get_schema_raw ()) == 0); - } - else if (val1 == NULL && val2 == NULL) + if (dest_type != DB_JSON_OBJECT || source_type != DB_JSON_OBJECT) { - return true; + dest.CopyFrom (*source, allocator); } else { - return false; + db_json_merge_two_json_objects_patch (source, dest, allocator); } } /* - * db_json_merge_func () - * j1 (in) - * j2 (in) - * doc (out): the result - * Json objects are merged like this: - * {"a":"b", "x":"y"} M {"a":"c"} -> {"a":["b","c"], "x":"y"} - * Json arrays as such: - * ["a", "b"] M ["x", "y"] -> ["a", "b", "x", "y"] - * Json scalars are transformed into arrays and merged normally + * db_json_merge_func () - Merge the source json into destination json + * + * return : error code + * dest (in) : json where to merge + * source (in) : json to merge + * patch (in) : how to handle duplicate keys + * + * example : let x = { "a": 1, "b": 2 } + * y = { "a": 3, "c": 4 } + * z = { "a": 5, "d": 6 } + * + * result PATCH : {"a": 5, "b": 2, "c": 4, "d": 6} + * result PRESERVE : {"a": [1, 3, 5], "b": 2, "c": 4, "d": 6} */ - int -db_json_merge_func (const JSON_DOC *source, JSON_DOC *&dest) +db_json_merge_func (const JSON_DOC *source, JSON_DOC *&dest, bool patch) { if (dest == NULL) { @@ -1720,24 +2562,13 @@ db_json_merge_func (const JSON_DOC *source, JSON_DOC *&dest) return NO_ERROR; } - if (db_json_get_type (dest) == db_json_get_type (source)) + if (patch) { - if (db_json_get_type (dest) == DB_JSON_OBJECT) - { - db_json_merge_two_json_objects (*dest, source); - } - else if (db_json_get_type (dest) == DB_JSON_ARRAY) - { - db_json_merge_two_json_arrays (*dest, source); - } - else - { - db_json_merge_two_json_by_array_wrapping (*dest, source); - } + db_json_merge_func_patch (source, *dest, dest->GetAllocator ()); } else { - db_json_merge_two_json_by_array_wrapping (*dest, source); + db_json_merge_func_preserve (source, *dest); } return NO_ERROR; @@ -1749,6 +2580,12 @@ db_json_get_int_from_document (const JSON_DOC *doc) return db_json_get_int_from_value (doc); } +std::int64_t +db_json_get_bigint_from_document (const JSON_DOC *doc) +{ + return db_json_get_bigint_from_value (doc); +} + double db_json_get_double_from_document (const JSON_DOC *doc) { @@ -1767,6 +2604,12 @@ db_json_get_bool_as_str_from_document (const JSON_DOC *doc) return db_json_get_bool_as_str_from_value (doc); } +bool +db_json_get_bool_from_document (const JSON_DOC *doc) +{ + return db_json_get_bool_from_value (doc); +} + char * db_json_copy_string_from_document (const JSON_DOC *doc) { @@ -1787,6 +2630,20 @@ db_json_get_int_from_value (const JSON_VALUE *val) return val->GetInt (); } +std::int64_t +db_json_get_bigint_from_value (const JSON_VALUE *val) +{ + if (val == NULL) + { + assert (false); + return 0; + } + + assert (db_json_get_type_of_value (val) == DB_JSON_BIGINT); + + return val->GetInt64 (); +} + double db_json_get_double_from_value (const JSON_VALUE *doc) { @@ -1837,6 +2694,12 @@ db_json_bool_to_string (bool b) char * db_json_get_bool_as_str_from_value (const JSON_VALUE *doc) +{ + return db_json_bool_to_string (db_json_get_bool_from_value (doc)); +} + +bool +db_json_get_bool_from_value (const JSON_VALUE *doc) { if (doc == NULL) { @@ -1845,7 +2708,7 @@ db_json_get_bool_as_str_from_value (const JSON_VALUE *doc) } assert (db_json_get_type_of_value (doc) == DB_JSON_BOOL); - return db_json_bool_to_string (doc->GetBool ()); + return doc->GetBool (); } static JSON_PATH_TYPE @@ -1910,7 +2773,7 @@ db_json_split_path_by_delimiters (const std::string &path, const std::string &de { if (path[end] == '"') { - std::size_t index_of_closing_quote = path.find_first_of ("\"", end+1); + std::size_t index_of_closing_quote = path.find_first_of ('"', end + 1); if (index_of_closing_quote == std::string::npos) { assert (false); @@ -1967,6 +2830,8 @@ db_json_split_path_by_delimiters (const std::string &path, const std::string &de static bool db_json_sql_path_is_valid (std::string &sql_path) { + std::size_t end_bracket_offset; + // skip leading white spaces db_json_normalize_path (sql_path); if (sql_path.empty ()) @@ -1989,9 +2854,8 @@ db_json_sql_path_is_valid (std::string &sql_path) switch (sql_path[i]) { case '[': - { - std::size_t end_bracket_offset = sql_path.find_first_of (']', ++i); - if (end_bracket_offset == sql_path.npos) + end_bracket_offset = sql_path.find_first_of (']', ++i); + if (end_bracket_offset == std::string::npos) { // unacceptable assert (false); @@ -2004,8 +2868,7 @@ db_json_sql_path_is_valid (std::string &sql_path) } // move to ']'. i will be incremented. i = end_bracket_offset; - } - break; + break; case '.': i++; @@ -2086,7 +2949,8 @@ db_json_er_set_path_does_not_exist (const char *file_name, const int line_no, co static int db_json_er_set_expected_other_type (const char *file_name, const int line_no, const std::string &path, - const DB_JSON_TYPE &found_type, const DB_JSON_TYPE &expected_type, const DB_JSON_TYPE &expected_type_optional) + const DB_JSON_TYPE &found_type, const DB_JSON_TYPE &expected_type, + const DB_JSON_TYPE &expected_type_optional) { std::string sql_path_string; int error_code = NO_ERROR; @@ -2095,7 +2959,7 @@ db_json_er_set_expected_other_type (const char *file_name, const int line_no, co error_code = db_json_convert_pointer_to_sql_path (path.c_str (), sql_path_string); if (error_code != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); return error_code; } @@ -2140,7 +3004,7 @@ db_json_replace_token_special_chars (std::string &token, // compare special characters with sequence following token_it if (token_idx + special_it->first.length () <= token.length ()) { - if (token.compare (token_idx, special_it->first.length (), special_it->first.c_str ()) == 0) + if (token.compare (token_idx, special_it->first.length (), special_it->first) == 0) { // replace token.replace (token_idx, special_it->first.length (), special_it->second); @@ -2194,10 +3058,8 @@ db_json_convert_pointer_to_sql_path (const char *pointer_path, std::string &sql_ // first we need to split into tokens std::vector tokens = db_json_split_path_by_delimiters (pointer_path_string, db_Json_pointer_delimiters); - for (std::size_t i = 0; i < tokens.size (); ++i) + for (std::string &token : tokens) { - std::string &token = tokens[i]; - if (db_json_path_is_token_valid_array_index (token)) { sql_path_out += "["; @@ -2237,7 +3099,8 @@ db_json_iszero (const unsigned char &ch) return ch == '0'; } -/* db_json_remove_leading_zeros_index () - Erase leading zeros from sql path index +/* + * db_json_remove_leading_zeros_index () - Erase leading zeros from sql path index * * index (in) : current object * example: $[000123] -> $[123] @@ -2257,6 +3120,7 @@ db_json_remove_leading_zeros_index (std::string &index) /* * db_json_convert_sql_path_to_pointer () + * * sql_path (in) * json_pointer_out (out): the result * An sql_path is converted to rapidjson standard path @@ -2300,7 +3164,27 @@ db_json_convert_sql_path_to_pointer (const char *sql_path, std::string &json_poi return NO_ERROR; } -/* db_json_get_paths_helper () - Recursive function to get the paths from a json object +/* + * db_json_get_paths_helper () - Recursive function to get the paths from a json object matching a pattern + * + * obj (in) : current object + * pattern (in) : the pattern to search for + * esc_char (in) : esc_char used to find values that match the pattern + * find_all (in) : whether to continue search after finding a match + * sql_path (in) : current path in the json search tree + * paths (out) : the paths found, whose pointed values match the pattern + */ +static int +db_json_search_helper (JSON_DOC &obj, const DB_VALUE *pattern, const DB_VALUE *esc_char, bool find_all, + const std::string &sql_path, bool &found, std::vector &paths) +{ + JSON_SEARCHER json_search_walker (sql_path, pattern, esc_char, find_all, paths); + + return json_search_walker.WalkDocument (obj); +} + +/* + * db_json_get_paths_helper () - Recursive function to get the paths from a json object * * obj (in) : current object * sql_path (in) : the path for the current object @@ -2335,7 +3219,8 @@ db_json_get_paths_helper (const JSON_VALUE &obj, const std::string &sql_path, st paths.push_back (sql_path); } -/* db_json_get_all_paths_func () - Returns the paths from a JSON document as a JSON array +/* + * db_json_get_all_paths_func () - Returns the paths from a JSON document as a JSON array * * doc (in) : json document * result_json (in) : a json array that contains all the paths @@ -2352,17 +3237,86 @@ db_json_get_all_paths_func (const JSON_DOC &doc, JSON_DOC *&result_json) result_json->SetArray (); - for (auto it = paths.begin (); it != paths.end (); ++it) + for (auto &path : paths) { JSON_VALUE val; - val.SetString (it->c_str (), result_json->GetAllocator ()); + val.SetString (path.c_str (), result_json->GetAllocator ()); result_json->PushBack (val, result_json->GetAllocator ()); } return NO_ERROR; } -/* db_json_keys_func () - Returns the keys from the top-level value of a JSON object as a JSON array +/* + * db_json_pretty_func () - Returns the stringified version of a JSON document + * + * doc (in) : json document + * result_str (in) : a string that contains the json in a pretty format + * NOTE: Memory for the result_str is obtained with db_private_strdup and needs to be freed + */ +void +db_json_pretty_func (const JSON_DOC &doc, char *&result_str) +{ + assert (result_str == nullptr); + + JSON_PRETTY_WRITER json_pretty_writer; + + doc.Accept (json_pretty_writer); + + result_str = db_private_strdup (NULL, json_pretty_writer.ToString ().c_str ()); +} + +/* + * db_json_arrayagg_func_accumulate () - Appends the value to the result_json + * + * return : void + * value (in) : value to append + * result_json (in) : the document where we want to append + * expand (in) : expand will be true only when aggregate 2 accumulators + */ +void +db_json_arrayagg_func_accumulate (const JSON_DOC *value, JSON_DOC &result_json) +{ + DB_JSON_TYPE result_json_type = db_json_get_type (&result_json); + + // only the first time the result_json will have DB_JSON_NULL type + if (result_json_type == DB_JSON_TYPE::DB_JSON_NULL) + { + result_json.SetArray (); + } + + assert (result_json.IsArray ()); + + JSON_VALUE value_copy (*value, result_json.GetAllocator ()); + result_json.PushBack (value_copy, result_json.GetAllocator ()); +} + +/* + * db_json_objectagg_func_accumulate () - Inserts a (key, value) pair in the result_json + * + * return : void + * key_str (in) : the key string + * val_doc (in) : the value document + * result_json (in) : the document where we want to insert + */ +void +db_json_objectagg_func_accumulate (const char *key_str, const JSON_DOC *val_doc, JSON_DOC &result_json) +{ + DB_JSON_TYPE result_json_type = db_json_get_type (&result_json); + + // only the first time the result_json will have DB_JSON_NULL type + if (result_json_type == DB_JSON_TYPE::DB_JSON_NULL) + { + result_json.SetObject (); + } + + assert (result_json.IsObject ()); + + db_json_add_member_to_object (&result_json, key_str, val_doc); +} + +/* + * db_json_keys_func () - Returns the keys from the top-level value of a JSON object as a JSON array * * return : error code * doc (in) : json document @@ -2433,7 +3387,7 @@ db_json_keys_func (const char *json_raw, JSON_DOC *&result_json, const char *raw error_code = db_json_get_json_from_str (json_raw, doc, json_raw_length); if (error_code != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); return error_code; } @@ -2446,13 +3400,14 @@ db_json_keys_func (const char *json_raw, JSON_DOC *&result_json, const char *raw bool db_json_value_has_numeric_type (const JSON_VALUE *doc) { - return db_json_get_type_of_value (doc) == DB_JSON_INT || db_json_get_type_of_value (doc) == DB_JSON_DOUBLE; + return db_json_get_type_of_value (doc) == DB_JSON_INT || db_json_get_type_of_value (doc) == DB_JSON_BIGINT + || db_json_get_type_of_value (doc) == DB_JSON_DOUBLE; } /* * The following rules define containment: * A candidate scalar is contained in a target scalar if and only if they are comparable and are equal. - * Two scalar values are comparable if they have the same JSON_TYPE() types, + * Two scalar values are comparable if they have the same JSON_TYPE () types, * with the exception that values of types INTEGER and DOUBLE are also comparable to each other. * * A candidate array is contained in a target array if and only if @@ -2490,6 +3445,10 @@ db_json_value_is_contained_in_doc_helper (const JSON_VALUE *doc, const JSON_VALU { result = (db_json_get_int_from_value (doc) == db_json_get_int_from_value (value)); } + else if (doc_type == DB_JSON_BIGINT) + { + result = (db_json_get_bigint_from_value (doc) == db_json_get_bigint_from_value (value)); + } else if (doc_type == DB_JSON_DOUBLE) { result = (db_json_get_double_from_value (doc) == db_json_get_double_from_value (value)); @@ -2598,6 +3557,11 @@ void db_json_set_int_to_doc (JSON_DOC *doc, int i) doc->SetInt (i); } +void db_json_set_bigint_to_doc (JSON_DOC *doc, std::int64_t i) +{ + doc->SetInt64 (i); +} + bool db_json_are_docs_equal (const JSON_DOC *doc1, const JSON_DOC *doc2) { if (doc1 == NULL || doc2 == NULL) @@ -2643,7 +3607,7 @@ db_json_path_is_token_valid_array_index (const std::string &str, std::size_t sta // json pointer will corespond the symbol '-' to JSON_ARRAY length // so if we have the json {"A":[1,2,3]} and the path /A/- // this will point to the 4th element of the array (zero indexed) - if (str.compare ("-") == 0) + if (str == "-") { return true; } @@ -2712,6 +3676,7 @@ JSON_WALKER::WalkValue (JSON_VALUE &value) { for (auto it = value.MemberBegin (); it != value.MemberEnd (); ++it) { + CallOnKeyIterate (it->name); error_code = WalkValue (it->value); if (error_code != NO_ERROR) { @@ -2724,6 +3689,7 @@ JSON_WALKER::WalkValue (JSON_VALUE &value) { for (JSON_VALUE *it = value.Begin (); it != value.End (); ++it) { + CallOnArrayIterate (); error_code = WalkValue (*it); if (error_code != NO_ERROR) { @@ -2748,16 +3714,18 @@ JSON_SERIALIZER::SaveSizePointers (char *ptr) { // save the current pointer m_size_pointers.push (ptr); + // skip the size m_error = or_put_int (m_buffer, 0); - return !HasError(); + + return !HasError (); } void JSON_SERIALIZER::SetSizePointers (SizeType size) { - char *buf = m_size_pointers.top(); - m_size_pointers.pop(); + char *buf = m_size_pointers.top (); + m_size_pointers.pop (); assert (buf >= m_buffer->buffer && buf < m_buffer->ptr); @@ -2768,13 +3736,13 @@ JSON_SERIALIZER::SetSizePointers (SizeType size) bool JSON_SERIALIZER::PackType (const DB_JSON_TYPE &type) { m_error = or_put_int (m_buffer, static_cast (type)); - return !HasError(); + return !HasError (); } bool JSON_SERIALIZER::PackString (const char *str) { m_error = or_put_string_aligned_with_length (m_buffer, str); - return !HasError(); + return !HasError (); } bool JSON_SERIALIZER_LENGTH::Null () @@ -2803,7 +3771,7 @@ bool JSON_SERIALIZER::Bool (bool b) } m_error = or_put_int (m_buffer, b ? 1 : 0); - return !HasError(); + return !HasError (); } bool JSON_SERIALIZER_LENGTH::Int (int i) @@ -2821,7 +3789,25 @@ bool JSON_SERIALIZER::Int (int i) } m_error = or_put_int (m_buffer, i); - return !HasError(); + return !HasError (); +} + +bool JSON_SERIALIZER_LENGTH::Int64 (std::int64_t i) +{ + // the encode will be TYPE|VALUE, where TYPE is int and value is int64 + m_length += GetTypePackedSize () + OR_BIGINT_SIZE; + return true; +} + +bool JSON_SERIALIZER::Int64 (std::int64_t i) +{ + if (!PackType (DB_JSON_BIGINT)) + { + return false; + } + + m_error = or_put_bigint (m_buffer, i); + return !HasError (); } bool JSON_SERIALIZER_LENGTH::Double (double d) @@ -2839,12 +3825,12 @@ bool JSON_SERIALIZER::Double (double d) } m_error = or_put_double (m_buffer, d); - return !HasError(); + return !HasError (); } bool JSON_SERIALIZER_LENGTH::String (const Ch *str, SizeType length, bool copy) { - m_length += GetTypePackedSize() + GetStringPackedSize (str); + m_length += GetTypePackedSize () + GetStringPackedSize (str); return true; } @@ -2865,14 +3851,14 @@ bool JSON_SERIALIZER::Key (const Ch *str, SizeType length, bool copy) return PackString (str); } -bool JSON_SERIALIZER_LENGTH::StartObject() +bool JSON_SERIALIZER_LENGTH::StartObject () { m_length += GetTypePackedSize (); m_length += OR_INT_SIZE; return true; } -bool JSON_SERIALIZER::StartObject() +bool JSON_SERIALIZER::StartObject () { if (!PackType (DB_JSON_OBJECT)) { @@ -2884,14 +3870,14 @@ bool JSON_SERIALIZER::StartObject() return SaveSizePointers (m_buffer->ptr); } -bool JSON_SERIALIZER_LENGTH::StartArray() +bool JSON_SERIALIZER_LENGTH::StartArray () { m_length += GetTypePackedSize (); m_length += OR_INT_SIZE; return true; } -bool JSON_SERIALIZER::StartArray() +bool JSON_SERIALIZER::StartArray () { if (!PackType (DB_JSON_ARRAY)) { @@ -2927,6 +3913,179 @@ bool JSON_SERIALIZER::EndArray (SizeType elementCount) return true; } +void JSON_PRETTY_WRITER::WriteDelimiters (bool is_key) +{ + // just a scalar, no indentation needed + if (m_level_stack.empty ()) + { + return; + } + + // there are 3 cases the current element can be + // 1) an element from an ARRAY + // 2) a key from an OBJECT + // 3) a value from an OBJECT + // when dealing with array elements, all elements except the first need to write a comma before writing his value + // when dealing with objects, all keys except the first need to write a comma before writing the current key value + if (is_key || m_level_stack.top ().type == DB_JSON_TYPE::DB_JSON_ARRAY) + { + // not the first key or the first element from ARRAY, so we need to separate elements + if (!m_level_stack.top ().is_first) + { + m_buffer.append (","); + } + else + { + // for the first key or element skip the comma + m_level_stack.top ().is_first = false; + } + + SetIndentOnNewLine (); + } + else + { + // the case we are in an OBJECT and print a value + assert (m_level_stack.top ().type == DB_JSON_TYPE::DB_JSON_OBJECT); + m_buffer.append (" "); + } +} + +void JSON_PRETTY_WRITER::PushLevel (const DB_JSON_TYPE &type) +{ + // advance one level + m_current_indent += LEVEL_INDENT_UNIT; + + // push the new context + m_level_stack.push (level_context (type, true)); +} + +void JSON_PRETTY_WRITER::PopLevel () +{ + // reestablish the old context + m_current_indent -= LEVEL_INDENT_UNIT; + m_level_stack.pop (); +} + +void JSON_PRETTY_WRITER::SetIndentOnNewLine () +{ + m_buffer.append ("\n").append (m_current_indent, ' '); +} + +bool JSON_PRETTY_WRITER::Null () +{ + WriteDelimiters (); + + m_buffer.append ("NULL"); + + return true; +} + +bool JSON_PRETTY_WRITER::Bool (bool b) +{ + WriteDelimiters (); + + m_buffer.append (b ? "true" : "false"); + + return true; +} + +bool JSON_PRETTY_WRITER::Int (int i) +{ + WriteDelimiters (); + + m_buffer.append (std::to_string (i)); + + return true; +} + +bool JSON_PRETTY_WRITER::Int64 (std::int64_t i) +{ + WriteDelimiters (); + + m_buffer.append (std::to_string (i)); + + return true; +} + +bool JSON_PRETTY_WRITER::Double (double d) +{ + WriteDelimiters (); + + m_buffer.append (std::to_string (d)); + + return true; +} + +bool JSON_PRETTY_WRITER::String (const Ch *str, SizeType length, bool copy) +{ + WriteDelimiters (); + + m_buffer.append ("\"").append (str).append ("\""); + + return true; +} + +bool JSON_PRETTY_WRITER::StartObject () +{ + WriteDelimiters (); + + m_buffer.append ("{"); + + PushLevel (DB_JSON_TYPE::DB_JSON_OBJECT); + + return true; +} + +bool JSON_PRETTY_WRITER::Key (const Ch *str, SizeType length, bool copy) +{ + WriteDelimiters (true); + + m_buffer.append ("\"").append (str).append ("\"").append (":"); + + return true; +} + +bool JSON_PRETTY_WRITER::StartArray () +{ + WriteDelimiters (); + + m_buffer.append ("["); + + PushLevel (DB_JSON_TYPE::DB_JSON_ARRAY); + + return true; +} + +bool JSON_PRETTY_WRITER::EndObject (SizeType memberCount) +{ + PopLevel (); + + if (memberCount != 0) + { + // go the next line and set the correct indentation + SetIndentOnNewLine (); + } + + m_buffer.append ("}"); + + return true; +} + +bool JSON_PRETTY_WRITER::EndArray (SizeType elementCount) +{ + PopLevel (); + + if (elementCount != 0) + { + // go the next line and set the correct indentation + SetIndentOnNewLine (); + } + + m_buffer.append ("]"); + + return true; +} + /* * db_json_serialize () - serialize a json document * @@ -2955,21 +4114,22 @@ std::size_t db_json_serialize_length (const JSON_DOC &doc) { JSON_SERIALIZER_LENGTH jsl; + doc.Accept (jsl); - return jsl.GetLength(); + return jsl.GetLength (); } /* -* db_json_or_buf_underflow () - Check if the buffer return underflow -* -* return : error_code -* buf (in) : the buffer which contains the data -* length (in) : the length of the string that we want to retrieve -* -* We do this check separately because we want to avoid an additional memory copy when getting the data from the buffer -* for storing it in the json document -*/ + * db_json_or_buf_underflow () - Check if the buffer return underflow + * + * return : error_code + * buf (in) : the buffer which contains the data + * length (in) : the length of the string that we want to retrieve + * + * We do this check separately because we want to avoid an additional memory copy when getting the data from the buffer + * for storing it in the json document + */ static int db_json_or_buf_underflow (OR_BUF *buf, size_t length) { @@ -2977,6 +4137,7 @@ db_json_or_buf_underflow (OR_BUF *buf, size_t length) { return or_underflow (buf); } + return NO_ERROR; } @@ -2999,12 +4160,12 @@ db_json_unpack_string_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEM { // we need to assert error here because or_underflow sets the error unlike or_overflow // which only returns the error code - ASSERT_ERROR(); + ASSERT_ERROR (); return rc; } // set the string directly from the buffer to avoid additional copy - value.SetString (buf->ptr, str_length - 1, doc_allocator); + value.SetString (buf->ptr, static_cast (str_length - 1), doc_allocator); // update the buffer pointer buf->ptr += str_length; @@ -3038,6 +4199,25 @@ db_json_unpack_int_to_value (OR_BUF *buf, JSON_VALUE &value) return NO_ERROR; } +static int +db_json_unpack_bigint_to_value (OR_BUF *buf, JSON_VALUE &value) +{ + int rc = NO_ERROR; + DB_BIGINT bigint_value; + + // unpack bigint + bigint_value = or_get_bigint (buf, &rc); + if (rc != NO_ERROR) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_TF_BUFFER_OVERFLOW, 0); + return rc; + } + + value.SetInt64 (bigint_value); + + return NO_ERROR; +} + static int db_json_unpack_double_to_value (OR_BUF *buf, JSON_VALUE &value) { @@ -3072,7 +4252,7 @@ db_json_unpack_bool_to_value (OR_BUF *buf, JSON_VALUE &value) assert (int_value == 0 || int_value == 1); - value.SetBool (int_value == 1 ? true : false); + value.SetBool (int_value == 1); return NO_ERROR; } @@ -3083,7 +4263,7 @@ db_json_unpack_object_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEM int rc = NO_ERROR; int size; - value.SetObject(); + value.SetObject (); // get the member count of the object size = or_get_int (buf, &rc); @@ -3101,7 +4281,7 @@ db_json_unpack_object_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEM rc = db_json_unpack_string_to_value (buf, key, doc_allocator); if (rc != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); return rc; } @@ -3126,7 +4306,7 @@ db_json_unpack_array_to_value (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_MEMP int rc = NO_ERROR; int size; - value.SetArray(); + value.SetArray (); // get the member count of the array size = or_get_int (buf, &rc); @@ -3171,7 +4351,7 @@ db_json_deserialize_doc_internal (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_M json_type = static_cast (or_get_int (buf, &rc)); if (rc != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); return rc; } @@ -3181,6 +4361,10 @@ db_json_deserialize_doc_internal (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_M rc = db_json_unpack_int_to_value (buf, value); break; + case DB_JSON_BIGINT: + rc = db_json_unpack_bigint_to_value (buf, value); + break; + case DB_JSON_DOUBLE: rc = db_json_unpack_double_to_value (buf, value); break; @@ -3194,7 +4378,7 @@ db_json_deserialize_doc_internal (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_M break; case DB_JSON_NULL: - value.SetNull(); + value.SetNull (); break; case DB_JSON_OBJECT: @@ -3213,7 +4397,7 @@ db_json_deserialize_doc_internal (OR_BUF *buf, JSON_VALUE &value, JSON_PRIVATE_M if (rc != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); } return rc; @@ -3234,12 +4418,12 @@ db_json_deserialize (OR_BUF *buf, JSON_DOC *&doc) // create the document that we want to reconstruct doc = db_json_allocate_doc (); - // the conversion from JSON_DOC to JSON_VALUE is needed because we want a refference to current node + // the conversion from JSON_DOC to JSON_VALUE is needed because we want a reference to current node // from json "tree" while iterating - error_code = db_json_deserialize_doc_internal (buf, db_json_doc_to_value (*doc), doc->GetAllocator()); + error_code = db_json_deserialize_doc_internal (buf, db_json_doc_to_value (*doc), doc->GetAllocator ()); if (error_code != NO_ERROR) { - ASSERT_ERROR(); + ASSERT_ERROR (); db_json_delete_doc (doc); } diff --git a/src/compat/db_json.hpp b/src/compat/db_json.hpp index 3874d3c70ae..1676e064b2b 100644 --- a/src/compat/db_json.hpp +++ b/src/compat/db_json.hpp @@ -21,8 +21,8 @@ * db_json.hpp - functions related to json */ -#ifndef _DB_JSON_H_ -#define _DB_JSON_H_ +#ifndef _DB_JSON_HPP_ +#define _DB_JSON_HPP_ #include "error_manager.h" #include "object_representation.h" @@ -30,15 +30,17 @@ #if defined (__cplusplus) class JSON_DOC; class JSON_VALIDATOR; +class JSON_ITERATOR; #else typedef void JSON_DOC; typedef void JSON_VALIDATOR; +typedef void JSON_ITERATOR; #endif #if defined (__cplusplus) #include -#include "thread_compat.hpp" +#include /* * these also double as type precedence @@ -49,6 +51,7 @@ enum DB_JSON_TYPE DB_JSON_NULL = 0, DB_JSON_UNKNOWN, DB_JSON_INT, + DB_JSON_BIGINT, DB_JSON_DOUBLE, DB_JSON_STRING, DB_JSON_OBJECT, @@ -68,19 +71,21 @@ bool db_json_is_valid (const char *json_str); const char *db_json_get_type_as_str (const JSON_DOC *document); unsigned int db_json_get_length (const JSON_DOC *document); unsigned int db_json_get_depth (const JSON_DOC *doc); -int db_json_extract_document_from_path (const JSON_DOC *document, const char *raw_path, - JSON_DOC *&result); +int db_json_extract_document_from_path (const JSON_DOC *document, const char *raw_path, JSON_DOC *&result); +int db_json_contains_path (const JSON_DOC *document, const char *raw_path, bool &result); char *db_json_get_raw_json_body_from_document (const JSON_DOC *doc); char *db_json_get_json_body_from_document (const JSON_DOC &doc); int db_json_add_member_to_object (JSON_DOC *doc, const char *name, const char *value); int db_json_add_member_to_object (JSON_DOC *doc, const char *name, int value); +int db_json_add_member_to_object (JSON_DOC *doc, const char *name, std::int64_t value); int db_json_add_member_to_object (JSON_DOC *doc, const char *name, double value); int db_json_add_member_to_object (JSON_DOC *doc, const char *name, const JSON_DOC *value); void db_json_add_element_to_array (JSON_DOC *doc, char *value); void db_json_add_element_to_array (JSON_DOC *doc, int value); +void db_json_add_element_to_array (JSON_DOC *doc, std::int64_t value); void db_json_add_element_to_array (JSON_DOC *doc, double value); void db_json_add_element_to_array (JSON_DOC *doc, const JSON_DOC *value); @@ -97,9 +102,16 @@ int db_json_set_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path int db_json_keys_func (const JSON_DOC &doc, JSON_DOC *&result_json, const char *raw_path); int db_json_keys_func (const char *json_raw, JSON_DOC *&result_json, const char *raw_path, size_t json_raw_length); int db_json_array_append_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path); +int db_json_array_insert_func (const JSON_DOC *value, JSON_DOC &doc, const char *raw_path); int db_json_remove_func (JSON_DOC &doc, const char *raw_path); -int db_json_merge_func (const JSON_DOC *source, JSON_DOC *&dest); +int db_json_search_func (JSON_DOC &doc, const DB_VALUE *pattern, const DB_VALUE *esc_char, bool find_all, + std::vector &starting_paths, std::vector &paths); +int db_json_merge_func (const JSON_DOC *source, JSON_DOC *&dest, bool patch); int db_json_get_all_paths_func (const JSON_DOC &doc, JSON_DOC *&result_json); +void db_json_pretty_func (const JSON_DOC &doc, char *&result_str); +int db_json_unquote (const JSON_DOC &doc, char *&result_str); +void db_json_arrayagg_func_accumulate (const JSON_DOC *value, JSON_DOC &result_json); +void db_json_objectagg_func_accumulate (const char *key_str, const JSON_DOC *val_doc, JSON_DOC &result_json); int db_json_object_contains_key (JSON_DOC *obj, const char *key, int &result); const char *db_json_get_schema_raw_from_validator (JSON_VALIDATOR *val); @@ -113,17 +125,30 @@ void db_json_delete_validator (JSON_VALIDATOR *&validator); int db_json_validate_doc (JSON_VALIDATOR *validator, JSON_DOC *doc); bool db_json_are_validators_equal (JSON_VALIDATOR *val1, JSON_VALIDATOR *val2); +void db_json_iterator_next (JSON_ITERATOR &json_itr); +const JSON_DOC *db_json_iterator_get_document (JSON_ITERATOR &json_itr); +bool db_json_iterator_has_next (JSON_ITERATOR &json_itr); +void db_json_set_iterator (JSON_ITERATOR *&json_itr, const JSON_DOC &new_doc); +void db_json_reset_iterator (JSON_ITERATOR *&json_itr); +bool db_json_iterator_is_empty (const JSON_ITERATOR &json_itr); +JSON_ITERATOR *db_json_create_iterator (const DB_JSON_TYPE &type); +void db_json_delete_json_iterator (JSON_ITERATOR *&json_itr); +void db_json_clear_json_iterator (JSON_ITERATOR *&json_itr); + DB_JSON_TYPE db_json_get_type (const JSON_DOC *doc); int db_json_get_int_from_document (const JSON_DOC *doc); +std::int64_t db_json_get_bigint_from_document (const JSON_DOC *doc); double db_json_get_double_from_document (const JSON_DOC *doc); const char *db_json_get_string_from_document (const JSON_DOC *doc); char *db_json_get_bool_as_str_from_document (const JSON_DOC *doc); +bool db_json_get_bool_from_document (const JSON_DOC *doc); char *db_json_copy_string_from_document (const JSON_DOC *doc); void db_json_set_string_to_doc (JSON_DOC *doc, const char *str); void db_json_set_double_to_doc (JSON_DOC *doc, double d); void db_json_set_int_to_doc (JSON_DOC *doc, int i); +void db_json_set_bigint_to_doc (JSON_DOC *doc, std::int64_t i); int db_json_value_is_contained_in_doc (const JSON_DOC *doc, const JSON_DOC *value, bool &result); bool db_json_are_docs_equal (const JSON_DOC *doc1, const JSON_DOC *doc2); @@ -152,4 +177,4 @@ db_json_convert_string_and_call (const char *json_raw, size_t json_raw_length, F #endif /* defined (__cplusplus) */ -#endif /* _DB_JSON_H_ */ +#endif /* _DB_JSON_HPP_ */ diff --git a/src/compat/db_macro.c b/src/compat/db_macro.c index 5bbbdc38e19..5c0bc6252f5 100644 --- a/src/compat/db_macro.c +++ b/src/compat/db_macro.c @@ -44,6 +44,7 @@ #if !defined(SERVER_MODE) #include "object_accessor.h" #endif +#include "elo.h" #include "db_elo.h" #include "numeric_opfunc.h" #include "object_primitive.h" @@ -1899,7 +1900,7 @@ transfer_string (char *dst, int *xflen, int *outlen, const int dstlen, if (dstlen > srclen) { - /* + /* * No truncation; copy the data and blank pad if necessary. */ memcpy (dst, src, srclen); @@ -1923,7 +1924,7 @@ transfer_string (char *dst, int *xflen, int *outlen, const int dstlen, } else { - /* + /* * Truncation is necessary; put as many bytes as possible into * the receiving buffer and null-terminate it (i.e., it receives * at most dstlen-1 bytes). If there is not outlen indicator by @@ -2092,7 +2093,7 @@ db_value_get (DB_VALUE * value, const DB_TYPE_C c_type, void *buf, const int buf goto invalid_args; } - /* + /* * *outlen will be non-zero only when converting to a character * output and truncation is necessary. All other cases should set * *outlen to 0 unless a NULL is encountered (which case we've @@ -2103,7 +2104,7 @@ db_value_get (DB_VALUE * value, const DB_TYPE_C c_type, void *buf, const int buf *outlen = 0; } - /* + /* * The numeric conversions below probably ought to be checking for * overflow and complaining when it happens. For example, trying to * get a double out into a DB_C_SHORT is likely to overflow; the @@ -2434,7 +2435,7 @@ db_value_get (DB_VALUE * value, const DB_TYPE_C c_type, void *buf, const int buf break; case DB_TYPE_C_MONETARY: { - /* + /* * WARNING: this works only so long as DB_C_MONETARY * is typedef'ed as a DB_MONETARY. If that changes, * so must this. @@ -3052,7 +3053,7 @@ coerce_char_to_dbvalue (DB_VALUE * value, char *buf, const int buflen) } else { - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_bit_string_coerce() * will fail. @@ -3356,7 +3357,7 @@ coerce_numeric_to_dbvalue (DB_VALUE * value, char *buf, const DB_TYPE_C c_type) switch (db_type) { case DB_TYPE_NUMERIC: - /* + /* * We need a better way to convert a numerical C type * into a NUMERIC. This will have to suffice for now. */ @@ -3471,7 +3472,7 @@ coerce_binary_to_dbvalue (DB_VALUE * value, char *buf, const int buflen) db_make_varchar (&tmp_value, DB_DEFAULT_PRECISION, buf, QSTR_NUM_BYTES (buflen), LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -3508,7 +3509,7 @@ coerce_binary_to_dbvalue (DB_VALUE * value, char *buf, const int buflen) db_make_varnchar (&tmp_value, DB_DEFAULT_PRECISION, buf, QSTR_NUM_BYTES (buflen), LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -3620,7 +3621,7 @@ coerce_date_to_dbvalue (DB_VALUE * value, char *buf) db_make_null (&tmp_value); qstr_make_typed_string (db_type, &tmp_value, length, tmp, length, LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -3701,7 +3702,7 @@ coerce_time_to_dbvalue (DB_VALUE * value, char *buf) db_make_null (&tmp_value); qstr_make_typed_string (db_type, &tmp_value, length, tmp, length, LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -3780,7 +3781,7 @@ coerce_timestamp_to_dbvalue (DB_VALUE * value, char *buf) db_make_null (&tmp_value); qstr_make_typed_string (db_type, &tmp_value, length, tmp, length, LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -3885,7 +3886,7 @@ coerce_datetime_to_dbvalue (DB_VALUE * value, char *buf) db_make_null (&tmp_value); qstr_make_typed_string (db_type, &tmp_value, length, tmp, length, LANG_SYS_CODESET, LANG_SYS_COLLATION); - /* + /* * If the precision is not specified, fix it to * the input precision otherwise db_char_string_coerce() * will fail. @@ -4933,7 +4934,7 @@ db_set_connect_status (int status) } /* - * db_default_expression_string() - + * db_default_expression_string() - * return : string opcode of default expression * default_expr_type(in): */ @@ -4998,6 +4999,12 @@ db_convert_json_into_scalar (const DB_VALUE * src, DB_VALUE * dest) db_make_int (dest, val); break; } + case DB_JSON_BIGINT: + { + int64_t val = db_json_get_bigint_from_document (doc); + db_make_bigint (dest, val); + break; + } case DB_JSON_DOUBLE: { double val = db_json_get_double_from_document (doc); @@ -5041,6 +5048,7 @@ db_is_json_value_type (DB_TYPE type) case DB_TYPE_DOUBLE: case DB_TYPE_JSON: case DB_TYPE_NUMERIC: + case DB_TYPE_BIGINT: return true; default: return false; @@ -5140,4 +5148,3 @@ db_value_is_corrupted (const DB_VALUE * value) return false; } - diff --git a/src/compat/db_query.h b/src/compat/db_query.h index c2b6dee0cf6..58b21b313fb 100644 --- a/src/compat/db_query.h +++ b/src/compat/db_query.h @@ -34,7 +34,6 @@ #include "config.h" #include "error_manager.h" -#include "dbdef.h" #include "object_primitive.h" #include "class_object.h" #include "cursor.h" diff --git a/src/compat/db_value_printer.cpp b/src/compat/db_value_printer.cpp index 784c7d92fe4..bca78eef82c 100644 --- a/src/compat/db_value_printer.cpp +++ b/src/compat/db_value_printer.cpp @@ -22,7 +22,7 @@ */ #include "db_value_printer.hpp" - +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" #include "intl_support.h" diff --git a/src/compat/db_vdb.c b/src/compat/db_vdb.c index 4f4fb9d6992..28403a356e7 100644 --- a/src/compat/db_vdb.c +++ b/src/compat/db_vdb.c @@ -68,7 +68,6 @@ enum static struct timeb base_server_timeb = { 0, 0, 0, 0 }; static struct timeb base_client_timeb = { 0, 0, 0, 0 }; - static int get_dimension_of (PT_NODE ** array); static DB_SESSION *db_open_local (void); static DB_SESSION *initialize_session (DB_SESSION * session); @@ -94,6 +93,7 @@ static bool db_check_limit_need_recompile (PARSER_CONTEXT * parser, PT_NODE * st static DB_CLASS_MODIFICATION_STATUS pt_has_modified_class (PARSER_CONTEXT * parser, PT_NODE * statement); static PT_NODE *pt_has_modified_class_helper (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *continue_walk); +static bool db_can_execute_statement_with_autocommit (PARSER_CONTEXT * parser, PT_NODE * statement); /* * get_dimemsion_of() - returns the number of elements of a null-terminated @@ -3968,8 +3968,6 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) { PT_NODE *statement; int stmt_ndx; - bool has_name_oid = false; - int info_hints; int error_code; bool has_user_trigger; @@ -4008,7 +4006,20 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) return NO_ERROR; } - /* Check whether statement can uses auto commit. */ + if (session->dimension > 1 && !session->parser->is_holdable) + { + /* Search for select. */ + for (int i = 0; i < session->dimension; i++) + { + if (session->statements[i] != NULL && PT_IS_QUERY_NODE_TYPE (session->statements[i]->node_type)) + { + /* Avoid situation when the driver requests data after closing cursors. */ + return NO_ERROR; + } + } + } + + /* Check whether statement can use auto commit. */ error_code = tr_has_user_trigger (&has_user_trigger); if (error_code != NO_ERROR) { @@ -4017,29 +4028,56 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) if (has_user_trigger) { - /* Triggers must be excuted before commit. Disable optimization. */ + /* Triggers must be executed before commit. Disable optimization. */ return NO_ERROR; } + if (db_can_execute_statement_with_autocommit (session->parser, statement)) + { + statement->use_auto_commit = 1; + } + + return NO_ERROR; +} + +/* + * db_can_execute_statement_with_autocommit () - Check whether the statement can be executed with commit. + * + * return : true, if the statement can be executed with commit. + * parser(in): the parser + * statement(in): the statement + * + */ +static bool +db_can_execute_statement_with_autocommit (PARSER_CONTEXT * parser, PT_NODE * statement) +{ + bool has_name_oid = false; + int info_hints; + PT_NODE *arg1, *arg2; + bool can_execute_statement_with_commit; + + assert (parser != NULL && statement != NULL); + /* Here you can add more statements, if you think that is safe to execute them with commit. * For now, we care about optimizing most common queries. */ + can_execute_statement_with_commit = false; + switch (statement->node_type) { case PT_SELECT: /* Check whether the optimization can be used. Disable it, if several broker/server requests are needed. */ - if (!statement->info.query.oids_included && !statement->info.query.is_view_spec - && !statement->info.query.has_system_class && statement->info.query.into_list == NULL) + if (!statement->info.query.oids_included && statement->info.query.into_list == NULL) { info_hints = (PT_HINT_SELECT_KEY_INFO | PT_HINT_SELECT_PAGE_INFO | PT_HINT_SELECT_KEY_INFO | PT_HINT_SELECT_BTREE_NODE_INFO); if ((statement->info.query.q.select.hint & info_hints) == 0) { - (void) parser_walk_tree (session->parser, statement->info.query.q.select.list, pt_has_name_oid, + (void) parser_walk_tree (parser, statement->info.query.q.select.list, pt_has_name_oid, &has_name_oid, NULL, NULL); if (!has_name_oid) { - statement->use_auto_commit = 1; + can_execute_statement_with_commit = true; } } } @@ -4049,7 +4087,7 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) /* Do not use optimization in case of insert execution on broker side */ if (statement->info.insert.execute_with_commit_allowed) { - statement->use_auto_commit = 1; + can_execute_statement_with_commit = true; } break; @@ -4057,7 +4095,7 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) /* Do not use optimization in case of update execution on broker side */ if (statement->info.update.execute_with_commit_allowed) { - statement->use_auto_commit = 1; + can_execute_statement_with_commit = true; } break; @@ -4068,7 +4106,7 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) /* If del_stmt_list is not null, we may need several broker/server requests */ if (statement->info.delete_.del_stmt_list == NULL) { - statement->use_auto_commit = 1; + can_execute_statement_with_commit = true; } } break; @@ -4076,14 +4114,45 @@ db_set_statement_auto_commit (DB_SESSION * session, bool auto_commit) case PT_MERGE: if (statement->info.merge.flags & PT_MERGE_INFO_SERVER_OP) { - statement->use_auto_commit = 1; + can_execute_statement_with_commit = true; } break; - // TODO - what else? for instance, merge, other dmls, ddls. + case PT_UNION: + case PT_INTERSECTION: + case PT_DIFFERENCE: + arg1 = statement->info.query.q.union_.arg1; + arg2 = statement->info.query.q.union_.arg2; + + /* At least one argument must be not null to enable the optimization. */ + if (arg1 != NULL) + { + if (arg2 != NULL) + { + if (db_can_execute_statement_with_autocommit (parser, arg1) + && db_can_execute_statement_with_autocommit (parser, arg2)) + { + can_execute_statement_with_commit = true; + } + } + else if (db_can_execute_statement_with_autocommit (parser, arg1)) + { + can_execute_statement_with_commit = true; + } + } + else if (arg2 != NULL) + { + if (db_can_execute_statement_with_autocommit (parser, arg2)) + { + can_execute_statement_with_commit = true; + } + } + break; + + // TODO - what else? for instance, other dmls, ddls. default: break; } - return NO_ERROR; + return can_execute_statement_with_commit; } diff --git a/src/compat/dbdef.h b/src/compat/dbdef.h deleted file mode 100644 index 0130e851ce9..00000000000 --- a/src/compat/dbdef.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - */ - - -/* - * dbdef.h -Supporting definitions for the CUBRID API functions. - * - */ - -#ifndef _DBDEF_H_ -#define _DBDEF_H_ - -#ident "$Id$" - -#include "cubrid_api.h" - -/* TODO: find a better place for these stuff. This file has no clear purpose */ - -#define SERVER_SESSION_KEY_SIZE 8 - -typedef struct dbdef_vol_ext_info DBDEF_VOL_EXT_INFO; -struct dbdef_vol_ext_info -{ - const char *path; /* Directory where the volume extension is created. If NULL, is given, it defaults to - * the system parameter. */ - const char *name; /* Name of the volume extension If NULL, system generates one like "db".ext"volid" - * where "db" is the database name and "volid" is the volume identifier to be assigned - * to the volume extension. */ - const char *comments; /* Comments which are included in the volume extension header. */ - int max_npages; /* Maximum pages of this volume */ - int extend_npages; /* Number of pages to extend - used for generic volume only */ - INT32 nsect_total; /* DKNSECTS type, number of sectors for volume extension */ - INT32 nsect_max; /* DKNSECTS type, maximum number of sectors for volume extension */ - int max_writesize_in_sec; /* the amount of volume written per second */ - DB_VOLPURPOSE purpose; /* The purpose of the volume extension. One of the following: - - * DB_PERMANENT_DATA_PURPOSE, DB_TEMPORARY_DATA_PURPOSE */ - DB_VOLTYPE voltype; /* Permanent of temporary volume type */ - bool overwrite; -}; - -typedef enum -{ - DB_PARTITION_HASH = 0, - DB_PARTITION_RANGE, - DB_PARTITION_LIST -} DB_PARTITION_TYPE; - -typedef enum -{ - DB_NOT_PARTITIONED_CLASS = 0, - DB_PARTITIONED_CLASS = 1, - DB_PARTITION_CLASS = 2 -} DB_CLASS_PARTITION_TYPE; - -#endif /* _DBDEF_H_ */ diff --git a/src/compat/dbi.h b/src/compat/dbi.h index 9aed30b11cb..9f66776b296 100644 --- a/src/compat/dbi.h +++ b/src/compat/dbi.h @@ -35,7 +35,6 @@ #include #include #include "dbtype_def.h" -#include "dbdef.h" #include "db_date.h" #include "db_elo.h" #include "db_query.h" @@ -50,38 +49,7 @@ extern "C" #define db_utime_to_string db_timestamp_to_string #define db_string_to_utime db_string_to_timestamp -/* the order to connect to db-hosts in databases.txt */ -#define DB_CONNECT_ORDER_SEQ 0 -#define DB_CONNECT_ORDER_RANDOM 1 - -/* abnormal DB host status */ -#define DB_HS_NORMAL 0x00000000 -#define DB_HS_CONN_TIMEOUT 0x00000001 -#define DB_HS_CONN_FAILURE 0x00000002 -#define DB_HS_MISMATCHED_RW_MODE 0x00000004 -#define DB_HS_HA_DELAYED 0x00000008 -#define DB_HS_NON_PREFFERED_HOSTS 0x00000010 -#define DB_HS_UNUSABLE_DATABASES 0x00000020 - -#define DB_HS_RECONNECT_INDICATOR \ - (DB_HS_MISMATCHED_RW_MODE | DB_HS_HA_DELAYED | DB_HS_NON_PREFFERED_HOSTS) - -/* host status for marking abnormal host status */ - typedef struct db_host_status DB_HOST_STATUS; - struct db_host_status - { - char hostname[MAXHOSTNAMELEN]; - int status; - }; - - typedef struct db_host_status_list DB_HOST_STATUS_LIST; - struct db_host_status_list - { - /* preferred_hosts + db-hosts */ - DB_HOST_STATUS hostlist[MAX_NUM_DB_HOSTS * 2]; - DB_HOST_STATUS *connected_host_status; - int last_host_idx; - }; + /* todo: These functions are duplicated in dbi_compat.h. Find a workaround. */ /* constants for db_include_oid */ enum diff --git a/src/compat/dbi_compat.h b/src/compat/dbi_compat.h index 09162a2605d..8b4257c91b9 100644 --- a/src/compat/dbi_compat.h +++ b/src/compat/dbi_compat.h @@ -43,13 +43,15 @@ #include "dbtype_def.h" #include "error_code.h" #include "dbtype_function.h" +#include "db_date.h" +#include "db_elo.h" +#include "cache_time.h" #ifdef __cplusplus extern "C" { #endif - #define DB_TRUE 1 #define DB_FALSE 0 @@ -117,170 +119,11 @@ extern "C" #define SQLX_CMD_SET_NAMES CUBRID_STMT_SET_NAMES #define SQLX_CMD_ALTER_STORED_PROCEDURE CUBRID_STMT_ALTER_STORED_PROCEDURE #define SQLX_CMD_ALTER_STORED_PROCEDURE_OWNER CUBRID_STMT_ALTER_STORED_PROCEDURE -#define SQLX_MAX_CMD_TYPE CUBRID_MAX_STMT_TYPE - - enum tz_region_type - { - TZ_REGION_OFFSET = 0, - TZ_REGION_ZONE = 1 - }; - typedef enum tz_region_type TZ_REGION_TYPE; - - typedef struct tz_region TZ_REGION; - struct tz_region - { - TZ_REGION_TYPE type; /* 0 : offset ; 1 : zone */ - union - { - int offset; /* in seconds */ - unsigned int zone_id; /* geographical zone id */ - }; - }; - - extern int db_date_weekday (DB_DATE * date); - extern int db_date_to_string (char *buf, int bufsize, DB_DATE * date); - extern bool db_string_check_explicit_date (const char *str, int str_len); - extern int db_string_to_date (const char *buf, DB_DATE * date); - extern int db_string_to_date_ex (const char *buf, int str_len, DB_DATE * date); - extern int db_date_parse_date (char const *str, int str_len, DB_DATE * date); - -/* DB_DATETIME functions */ - extern int db_datetime_encode (DB_DATETIME * datetime, int month, int day, int year, int hour, int minute, int second, - int millisecond); - extern int db_datetime_decode (const DB_DATETIME * datetime, int *month, int *day, int *year, int *hour, int *minute, - int *second, int *millisecond); - extern int db_datetime_to_string (char *buf, int bufsize, DB_DATETIME * datetime); - extern int db_datetimetz_to_string (char *buf, int bufsize, DB_DATETIME * dt, const TZ_ID * tz_id); - extern int db_datetimeltz_to_string (char *buf, int bufsize, DB_DATETIME * dt); - extern int db_datetime_to_string2 (char *buf, int bufsize, DB_DATETIME * datetime); - extern int db_string_to_datetime (const char *str, DB_DATETIME * datetime); - extern int db_string_to_datetime_ex (const char *str, int str_len, DB_DATETIME * datetime); - extern int db_string_to_datetimetz (const char *str, DB_DATETIMETZ * dt_tz, bool * has_zone); - extern int db_string_to_datetimetz_ex (const char *str, int str_len, DB_DATETIMETZ * dt_tz, bool * has_zone); - extern int db_string_to_datetimeltz (const char *str, DB_DATETIME * datetime); - extern int db_string_to_datetimeltz_ex (const char *str, int str_len, DB_DATETIME * datetime); - extern int db_date_parse_datetime_parts (char const *str, int str_len, DB_DATETIME * date, bool * is_explicit_time, - bool * has_explicit_msec, bool * fits_as_timestamp, char const **endp); - extern int db_date_parse_datetime (char const *str, int str_len, DB_DATETIME * datetime); - extern int db_subtract_int_from_datetime (DB_DATETIME * dt1, DB_BIGINT i2, DB_DATETIME * result_datetime); - extern int db_add_int_to_datetime (DB_DATETIME * datetime, DB_BIGINT i2, DB_DATETIME * result_datetime); -/* DB_TIMESTAMP functions */ - extern int db_timestamp_encode (DB_TIMESTAMP * utime, DB_DATE * date, DB_TIME * timeval); - extern int db_timestamp_encode_ses (const DB_DATE * date, const DB_TIME * timeval, DB_TIMESTAMP * utime, - TZ_ID * dest_tz_id); - extern int db_timestamp_encode_utc (const DB_DATE * date, const DB_TIME * timeval, DB_TIMESTAMP * utime); - extern int db_timestamp_decode_ses (const DB_TIMESTAMP * utime, DB_DATE * date, DB_TIME * timeval); - extern void db_timestamp_decode_utc (const DB_TIMESTAMP * utime, DB_DATE * date, DB_TIME * timeval); - extern int db_timestamp_decode_w_reg (const DB_TIMESTAMP * utime, const TZ_REGION * tz_region, DB_DATE * date, - DB_TIME * timeval); - extern int db_timestamp_decode_w_tz_id (const DB_TIMESTAMP * utime, const TZ_ID * tz_id, DB_DATE * date, - DB_TIME * timeval); - extern int db_timestamp_to_string (char *buf, int bufsize, DB_TIMESTAMP * utime); - extern int db_timestamptz_to_string (char *buf, int bufsize, DB_TIMESTAMP * utime, const TZ_ID * tz_id); - extern int db_timestampltz_to_string (char *buf, int bufsize, DB_TIMESTAMP * utime); - extern int db_string_to_timestamp (const char *buf, DB_TIMESTAMP * utime); - extern int db_string_to_timestamp_ex (const char *buf, int buf_len, DB_TIMESTAMP * utime); - extern int db_date_parse_timestamp (char const *str, int str_len, DB_TIMESTAMP * utime); - extern int db_string_to_timestamptz (const char *str, DB_TIMESTAMPTZ * ts_tz, bool * has_zone); - extern int db_string_to_timestamptz_ex (const char *str, int str_len, DB_TIMESTAMPTZ * ts_tz, bool * has_zone); - extern int db_string_to_timestampltz (const char *str, DB_TIMESTAMP * ts); - extern int db_string_to_timestampltz_ex (const char *str, int str_len, DB_TIMESTAMP * ts); - -/* DB_TIME functions */ - extern int db_time_to_string (char *buf, int bufsize, DB_TIME * dbtime); - extern bool db_string_check_explicit_time (const char *str, int str_len); - extern int db_string_to_time (const char *buf, DB_TIME * dbtime); - extern int db_string_to_time_ex (const char *buf, int buf_len, DB_TIME * dbtime); - extern int db_date_parse_time (char const *str, int str_len, DB_TIME * time, int *milisec); - -/* Unix-like functions */ - extern time_t db_mktime (DB_DATE * date, DB_TIME * timeval); - extern int db_strftime (char *s, int smax, const char *fmt, DB_DATE * date, DB_TIME * timeval); - extern void db_localtime (time_t * epoch_time, DB_DATE * date, DB_TIME * timeval); - extern void db_localdatetime (time_t * epoch_time, DB_DATETIME * datetime); - - -/* generic calculation functions */ - extern int julian_encode (int m, int d, int y); - extern void julian_decode (int jul, int *monthp, int *dayp, int *yearp, int *weekp); - extern int day_of_week (int jul_day); - extern bool is_leap_year (int year); - extern int db_tm_encode (struct tm *c_time_struct, DB_DATE * date, DB_TIME * timeval); - extern int db_get_day_of_year (int year, int month, int day); - extern int db_get_day_of_week (int year, int month, int day); - extern int db_get_week_of_year (int year, int month, int day, int mode); - extern int db_check_time_date_format (const char *format_s); - extern int db_add_weeks_and_days_to_date (int *day, int *month, int *year, int weeks, int day_week); - -/* DB_ELO function */ - extern int db_create_fbo (DB_VALUE * value, DB_TYPE type); - extern int db_elo_copy_structure (const DB_ELO * src, DB_ELO * dest); - extern void db_elo_free_structure (DB_ELO * elo); - - extern int db_elo_copy (const DB_ELO * src, DB_ELO * dest); - extern int db_elo_delete (DB_ELO * elo); - - extern int64_t db_elo_size (DB_ELO * elo); - extern int db_elo_read (const DB_ELO * elo, int64_t pos, void *buf, size_t count); - extern int db_elo_write (DB_ELO * elo, int64_t pos, void *buf, size_t count); - -/* Unix-like functions */ - extern time_t db_mktime (DB_DATE * date, DB_TIME * timeval); - extern int db_strftime (char *s, int smax, const char *fmt, DB_DATE * date, DB_TIME * timeval); - extern void db_localtime (time_t * epoch_time, DB_DATE * date, DB_TIME * timeval); - -/* generic calculation functions */ - extern int db_tm_encode (struct tm *c_time_struct, DB_DATE * date, DB_TIME * timeval); - - - - typedef struct cache_time CACHE_TIME; - struct cache_time - { - int sec; - int usec; - }; - -#define CACHE_TIME_EQ(T1, T2) \ - (((T1)->sec != 0) && \ - ((T1)->sec == (T2)->sec) && \ - ((T1)->usec == (T2)->usec)) - -#define CACHE_TIME_RESET(T) \ - do { \ - (T)->sec = 0; \ - (T)->usec = 0; \ - } while (0) - -#define CACHE_TIME_MAKE(CT, TV) \ - do { \ - (CT)->sec = (TV)->tv_sec; \ - (CT)->usec = (TV)->tv_usec; \ - } while (0) - -#define OR_CACHE_TIME_SIZE (OR_INT_SIZE * 2) - -#define OR_PACK_CACHE_TIME(PTR, T) \ - do { \ - if ((CACHE_TIME *) (T) != NULL) { \ - PTR = or_pack_int(PTR, (T)->sec); \ - PTR = or_pack_int(PTR, (T)->usec); \ - } \ - else { \ - PTR = or_pack_int(PTR, 0); \ - PTR = or_pack_int(PTR, 0); \ - } \ - } while (0) - -#define OR_UNPACK_CACHE_TIME(PTR, T) \ - do { \ - if ((CACHE_TIME *) (T) != NULL) { \ - PTR = or_unpack_int(PTR, &((T)->sec)); \ - PTR = or_unpack_int(PTR, &((T)->usec)); \ - } \ - } while (0) +#define SQLX_MAX_CMD_TYPE CUBRID_MAX_STMT_TYPE +#define SQLX_CMD_CALL_SP CUBRID_STMT_CALL_SP +#define SQLX_CMD_UNKNOWN CUBRID_STMT_UNKNOWN extern bool db_is_client_cache_reusable (DB_QUERY_RESULT * result); extern int db_query_seek_tuple (DB_QUERY_RESULT * result, int offset, int seek_mode); diff --git a/src/compat/dbtype.h b/src/compat/dbtype.h index 23b157e3f77..cf930424337 100644 --- a/src/compat/dbtype.h +++ b/src/compat/dbtype.h @@ -32,7 +32,6 @@ #include "config.h" #include "system_parameter.h" -#include "dbdef.h" #include "error_manager.h" #include "system.h" #include "dbtype_def.h" @@ -43,7 +42,6 @@ #include "object_primitive.h" #include "memory_alloc.h" - #define DB_CURRENCY_DEFAULT db_get_currency_default() #define db_set db_collection @@ -150,11 +148,6 @@ extern "C" { #endif - //extern DB_TYPE setobj_type (COL * set); - /********************************************************/ - /* From elo.h */ - - /********************************************************/ /* From db_date.h */ @@ -286,6 +279,8 @@ extern "C" extern bool db_value_is_corrupted (const DB_VALUE * value); + extern int db_json_val_from_str (const char *raw_str, const int str_size, DB_VALUE * json_val); + /* Use the inline version of the functions. */ #include "dbtype_function.i" @@ -293,4 +288,4 @@ extern "C" } #endif -#endif /* _DBTYPE_H_ */ +#endif /* _DBTYPE_H_ */ diff --git a/src/compat/dbtype_def.h b/src/compat/dbtype_def.h index bd3e55be637..cefeadc7493 100644 --- a/src/compat/dbtype_def.h +++ b/src/compat/dbtype_def.h @@ -42,6 +42,12 @@ extern "C" typedef char need_clear_type; #endif +#define IS_VALID_ISOLATION_LEVEL(isolation_level) \ + (TRAN_MINVALUE_ISOLATION <= (isolation_level) \ + && (isolation_level) <= TRAN_MAXVALUE_ISOLATION) + +#define TRAN_DEFAULT_ISOLATION_LEVEL() (TRAN_DEFAULT_ISOLATION) + #if defined (__GNUC__) && defined (NDEBUG) #define ALWAYS_INLINE always_inline #else @@ -1245,6 +1251,27 @@ extern "C" V_ERROR = -1 } DB_LOGICAL; +/********************************************************/ + /* From tz_support.h */ + enum tz_region_type + { + TZ_REGION_OFFSET = 0, + TZ_REGION_ZONE = 1 + }; + typedef enum tz_region_type TZ_REGION_TYPE; + + typedef struct tz_region TZ_REGION; + struct tz_region + { + TZ_REGION_TYPE type; /* 0 : offset ; 1 : zone */ + union + { + int offset; /* in seconds */ + unsigned int zone_id; /* geographical zone id */ + }; + }; +/********************************************************/ + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/src/compat/json_table_def.h b/src/compat/json_table_def.h new file mode 100644 index 00000000000..c4fa1229e57 --- /dev/null +++ b/src/compat/json_table_def.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// json_table_def.h - json table common definitions (cross modules) +// + +#ifndef _JSON_TABLE_DEF_H_ +#define _JSON_TABLE_DEF_H_ + +// note - this is included in C compiled files + +// forward definitions +struct db_value; + +enum json_table_column_behavior_type +{ + JSON_TABLE_RETURN_NULL, + JSON_TABLE_THROW_ERROR, + JSON_TABLE_DEFAULT_VALUE +}; + +enum json_table_column_function +{ + JSON_TABLE_EXTRACT, + JSON_TABLE_EXISTS, + JSON_TABLE_ORDINALITY +}; + +struct json_table_column_behavior +{ + enum json_table_column_behavior_type m_behavior; + struct db_value *m_default_value; +}; + +enum json_table_expand_type +{ + JSON_TABLE_ARRAY_EXPAND, + JSON_TABLE_OBJECT_EXPAND, + JSON_TABLE_NO_EXPAND +}; + +#endif // _JSON_TABLE_DEF_H_ diff --git a/src/connection/connection_cl.h b/src/connection/connection_cl.h index 2eb3d1b7318..48ec6f66af9 100644 --- a/src/connection/connection_cl.h +++ b/src/connection/connection_cl.h @@ -30,6 +30,22 @@ #include "connection_defs.h" #include "connection_support.h" +/* the order to connect to db-hosts in databases.txt */ +#define DB_CONNECT_ORDER_SEQ 0 +#define DB_CONNECT_ORDER_RANDOM 1 + +/* abnormal DB host status */ +#define DB_HS_NORMAL 0x00000000 +#define DB_HS_CONN_TIMEOUT 0x00000001 +#define DB_HS_CONN_FAILURE 0x00000002 +#define DB_HS_MISMATCHED_RW_MODE 0x00000004 +#define DB_HS_HA_DELAYED 0x00000008 +#define DB_HS_NON_PREFFERED_HOSTS 0x00000010 +#define DB_HS_UNUSABLE_DATABASES 0x00000020 + +#define DB_HS_RECONNECT_INDICATOR \ + (DB_HS_MISMATCHED_RW_MODE | DB_HS_HA_DELAYED | DB_HS_NON_PREFFERED_HOSTS) + extern void css_shutdown_conn (CSS_CONN_ENTRY * conn); extern CSS_CONN_ENTRY *css_make_conn (SOCKET fd); extern void css_free_conn (CSS_CONN_ENTRY * conn); diff --git a/src/connection/connection_support.c b/src/connection/connection_support.c index 442198f236c..3e5a307c3bb 100644 --- a/src/connection/connection_support.c +++ b/src/connection/connection_support.c @@ -83,6 +83,7 @@ #include "heap_file.h" #endif /* defined (SERVER_MODE) || defined (SA_MODE) */ #include "dbtype.h" +#include "tz_support.h" #include "db_date.h" #include "show_scan.h" diff --git a/src/executables/compactdb.c b/src/executables/compactdb.c index 9a42ba312ed..90180131af9 100644 --- a/src/executables/compactdb.c +++ b/src/executables/compactdb.c @@ -32,7 +32,6 @@ #include "porting.h" #include "dbtype.h" -#include "dbdef.h" #include "load_object.h" #include "db.h" #include "locator_cl.h" diff --git a/src/executables/compactdb_cl.c b/src/executables/compactdb_cl.c index e1f53c9b1cf..5deec075849 100644 --- a/src/executables/compactdb_cl.c +++ b/src/executables/compactdb_cl.c @@ -27,7 +27,6 @@ #include "porting.h" #include "dbtype.h" -#include "dbdef.h" #include "load_object.h" #include "db.h" #include "locator_cl.h" diff --git a/src/executables/loader.h b/src/executables/loader.h index 0f5070fe927..7c1e15fe732 100644 --- a/src/executables/loader.h +++ b/src/executables/loader.h @@ -30,7 +30,7 @@ //#include #include "porting.h" -#include "dbdef.h" +#include "dbtype_def.h" typedef struct LDR_CONTEXT LDR_CONTEXT; diff --git a/src/executables/loader_grammar.y b/src/executables/loader_grammar.y index 1078769002c..22941d319e8 100644 --- a/src/executables/loader_grammar.y +++ b/src/executables/loader_grammar.y @@ -29,9 +29,9 @@ #include #include +#include "dbtype.h" #include "dbi.h" #include "utility.h" -#include "dbtype.h" #include "language_support.h" #include "message_catalog.h" #include "memory_alloc.h" diff --git a/src/executables/loadjava.c b/src/executables/loadjava.c index 46cab944281..02b4224afc2 100644 --- a/src/executables/loadjava.c +++ b/src/executables/loadjava.c @@ -116,7 +116,8 @@ filecopy (const char *fn_src, const char *fn_dst) c = getchar (); if (c != 'Y' && c != 'y') { - fprintf (stdout, "loadjava is cancled\n"); + fclose (fh_src); + fprintf (stdout, "loadjava is canceled\n"); return 0; } } diff --git a/src/executables/master.c b/src/executables/master.c index 184764563e8..8178cacb818 100644 --- a/src/executables/master.c +++ b/src/executables/master.c @@ -1127,7 +1127,7 @@ main (int argc, char **argv) { int port_id; CSS_CONN_ENTRY *conn; - static const char *suffix = "_master.err"; + static const char suffix[] = "_master.err"; char hostname[MAXHOSTNAMELEN + sizeof (suffix)]; char *errlog = NULL; int status = EXIT_SUCCESS; diff --git a/src/executables/util_cs.c b/src/executables/util_cs.c index f3d40a26c87..0773a029285 100644 --- a/src/executables/util_cs.c +++ b/src/executables/util_cs.c @@ -2685,6 +2685,7 @@ copylogdb (UTIL_FUNCTION_ARG * arg) return EXIT_FAILURE; } + er_set_ignore_uninit (true); } #endif @@ -2891,6 +2892,7 @@ applylogdb (UTIL_FUNCTION_ARG * arg) } return EXIT_FAILURE; } + er_set_ignore_uninit (true); } if (HA_GET_MODE () == HA_MODE_REPLICA) diff --git a/src/jdbc/com/cubrid/jsp/ExecuteThread.java b/src/jdbc/com/cubrid/jsp/ExecuteThread.java old mode 100755 new mode 100644 index 8361f14ed8d..e5bbe2f7d57 --- a/src/jdbc/com/cubrid/jsp/ExecuteThread.java +++ b/src/jdbc/com/cubrid/jsp/ExecuteThread.java @@ -171,8 +171,8 @@ public void run() { sp = null; result = null; - // toClient.close(); - // client.close(); + // toClient.close(); + // client.close(); } catch (Throwable e) { if (e instanceof IOException) { break; @@ -329,8 +329,8 @@ private void sendError(Throwable e, Socket socket) throws IOException { toClient.writeInt(0x4); toClient.flush(); - // toClient.close(); - // client.close(); + // toClient.close(); + // client.close(); } private StoredProcedure makeStoredProcedure() throws Exception { @@ -338,30 +338,30 @@ private StoredProcedure makeStoredProcedure() throws Exception { this.client.getInputStream())); int startCode = dis.readInt(); - // System.out.println("startCode= " + startCode); + // System.out.println("startCode= " + startCode); if (startCode != 0x1) return null; int methodSigLength = dis.readInt(); - // System.out.println("methodSigLength= " + methodSigLength); + // System.out.println("methodSigLength= " + methodSigLength); byte[] methodSig = new byte[methodSigLength]; dis.readFully(methodSig); - // System.out.println("methodSig= " + new String(methodSig)); + // System.out.println("methodSig= " + new String(methodSig)); int paramCount = dis.readInt(); - // System.out.println("paramCount= " + paramCount); + // System.out.println("paramCount= " + paramCount); Value[] args = readArguments(dis, paramCount); - // for (int i = 0; i < args.length; i++) { - // System.out.println("arg[" + i + "]= " + args[i]); - // } + // for (int i = 0; i < args.length; i++) { + // System.out.println("arg[" + i + "]= " + args[i]); + // } int returnType = dis.readInt(); - // System.out.println("returnType= " + returnType); + // System.out.println("returnType= " + returnType); int endCode = dis.readInt(); - // System.out.println("endcode= " + endCode); + // System.out.println("endcode= " + endCode); if (startCode != endCode) return null; @@ -374,16 +374,16 @@ private Value[] readArguments(DataInputStream dis, int paramCount) for (int i = 0; i < paramCount; i++) { int mode = dis.readInt(); - // System.out.println("mode= " + mode); + // System.out.println("mode= " + mode); int dbType = dis.readInt(); - // System.out.println("dbType= " + dbType); + // System.out.println("dbType= " + dbType); int paramType = dis.readInt(); - // System.out.println("paramType= " + paramType); + // System.out.println("paramType= " + paramType); int paramSize = dis.readInt(); - // System.out.println("paramSize= " + paramSize); + // System.out.println("paramSize= " + paramSize); Value arg = readArgument(dis, paramSize, paramType, mode, dbType); args[i] = (arg); @@ -398,10 +398,10 @@ private Value[] readArgumentsForSet(DataInputStream dis, int paramCount) for (int i = 0; i < paramCount; i++) { int paramType = dis.readInt(); - // System.out.println("paramType= " + paramType); + // System.out.println("paramType= " + paramType); int paramSize = dis.readInt(); - // System.out.println("paramSize= " + paramSize); + // System.out.println("paramSize= " + paramSize); Value arg = readArgument(dis, paramSize, paramType, Value.IN, 0); args[i] = (arg); @@ -416,11 +416,11 @@ private Value readArgument(DataInputStream dis, int paramSize, Value arg = null; switch (paramType) { case DB_SHORT: - // assert paramSize == 4 + // assert paramSize == 4 arg = new ShortValue((short) dis.readInt(), mode, dbType); break; case DB_INT: - // assert paramSize == 4 + // assert paramSize == 4 arg = new IntValue(dis.readInt(), mode, dbType); break; case DB_BIGINT: @@ -428,12 +428,12 @@ private Value readArgument(DataInputStream dis, int paramSize, arg = new LongValue(dis.readLong(), mode, dbType); break; case DB_FLOAT: - // assert paramSize == 4 + // assert paramSize == 4 arg = new FloatValue(dis.readFloat(), mode, dbType); break; case DB_DOUBLE: case DB_MONETARY: - // assert paramSize == 8 + // assert paramSize == 8 arg = new DoubleValue(dis.readDouble(), mode, dbType); break; case DB_NUMERIC: { @@ -454,7 +454,7 @@ private Value readArgument(DataInputStream dis, int paramSize, break; case DB_CHAR: case DB_STRING: - // assert paramSize == n + // assert paramSize == n { byte[] paramValue = new byte[paramSize]; dis.readFully(paramValue); @@ -471,7 +471,7 @@ private Value readArgument(DataInputStream dis, int paramSize, } break; case DB_DATE: - // assert paramSize == 3 + // assert paramSize == 3 { int year = dis.readInt(); int month = dis.readInt(); @@ -481,7 +481,7 @@ private Value readArgument(DataInputStream dis, int paramSize, } break; case DB_TIME: - // assert paramSize == 3 + // assert paramSize == 3 { int hour = dis.readInt(); int min = dis.readInt(); @@ -493,7 +493,7 @@ private Value readArgument(DataInputStream dis, int paramSize, } break; case DB_TIMESTAMP: - // assert paramSize == 6 + // assert paramSize == 6 { int year = dis.readInt(); int month = dis.readInt(); @@ -529,7 +529,7 @@ private Value readArgument(DataInputStream dis, int paramSize, case DB_MULTISET: case DB_SEQUENCE: { int nCol = dis.readInt(); - // System.out.println(nCol); + // System.out.println(nCol); arg = new SetValue(readArgumentsForSet(dis, nCol), mode, dbType); } break; @@ -560,7 +560,7 @@ private Value readArgument(DataInputStream dis, int paramSize, arg = new NullValue(mode, dbType); break; default: - // unknown type + // unknown type break; } return arg; diff --git a/src/jdbc/com/cubrid/jsp/Server.java b/src/jdbc/com/cubrid/jsp/Server.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/StoredProcedure.java b/src/jdbc/com/cubrid/jsp/StoredProcedure.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/StoredProcedureClassLoader.java b/src/jdbc/com/cubrid/jsp/StoredProcedureClassLoader.java old mode 100755 new mode 100644 index 74481645a3d..8afb4d5c69f --- a/src/jdbc/com/cubrid/jsp/StoredProcedureClassLoader.java +++ b/src/jdbc/com/cubrid/jsp/StoredProcedureClassLoader.java @@ -1,30 +1,30 @@ /* - * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: * - * - Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. * - * - Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. * - * - Neither the name of the nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. + * - Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, - * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. * */ @@ -38,7 +38,7 @@ import java.util.HashMap; public class StoredProcedureClassLoader extends URLClassLoader { - private static StoredProcedureClassLoader instance = null; + private static volatile StoredProcedureClassLoader instance = null; private HashMap files = new HashMap(); @@ -51,32 +51,28 @@ private StoredProcedureClassLoader() { private void init() { root = new File(Server.getSpPath() + "/java"); + initJars(); + initClasses(); + } + + private void initJars() { File[] jars = root.listFiles(new FileFilter() { public boolean accept(File f) { - if (f.getName().lastIndexOf(".jar") > 0) - return true; - return false; + return isJarFile(f); + } }); - for (int i = 0; i < jars.length; i++) { - files.put(jars[i].getName(), new Long(jars[i].lastModified())); + if (jars == null) { + return; } - File[] classes = root.listFiles(new FileFilter() { - public boolean accept(File f) { - if (f.getName().lastIndexOf(".class") > 0) - return true; - return false; - } - }); - - for (int i = 0; i < classes.length; i++) { - files.put(classes[i].getName(), new Long(classes[i].lastModified())); + for (int i = 0; i < jars.length; i++) { + files.put(jars[i].getName(), jars[i].lastModified()); } try { - addURL(root.toURI().toURL()); + addURL(root.toURI().toURL()); for (int i = 0; i < jars.length; i++) { addURL(jars[i].toURI().toURL()); } @@ -85,39 +81,55 @@ public boolean accept(File f) { } } + private void initClasses() { + File[] classes = root.listFiles(new FileFilter() { + public boolean accept(File f) { + return isClassFile(f); + } + }); + + if (classes == null) { + return; + } + + for (int i = 0; i < classes.length; i++) { + files.put(classes[i].getName(), classes[i].lastModified()); + } + } + public Class loadClass(String name) throws ClassNotFoundException { - try { - if (!modified()) - return super.loadClass(name); - } catch (MalformedURLException e) { - Server.log(e); + if (!modified()) { + return super.loadClass(name); } instance = new StoredProcedureClassLoader(); return instance.loadClass(name); } - private boolean modified() throws MalformedURLException { + private boolean modified() { File[] files = root.listFiles(new FileFilter() { public boolean accept(File f) { - if (f.getName().lastIndexOf(".jar") > 0) - return true; - if (f.getName().lastIndexOf(".class") > 0) - return true; - return false; + return isJarFile(f) || isClassFile(f); } }); - if (this.files.size() != files.length) + if (files == null) { + return !this.files.isEmpty(); + } + + if (this.files.size() != files.length) { return true; + } for (int i = 0; i < files.length; i++) { - if (!this.files.containsKey(files[i].getName())) + if (!this.files.containsKey(files[i].getName())) { return true; + } - long l = ((Long) this.files.get(files[i].getName())).longValue(); - if (files[i].lastModified() != l) + long l = this.files.get(files[i].getName()); + if (files[i].lastModified() != l) { return true; + } } return false; @@ -125,9 +137,21 @@ public boolean accept(File f) { public static StoredProcedureClassLoader getInstance() { if (instance == null) { - instance = new StoredProcedureClassLoader(); + synchronized (StoredProcedureClassLoader.class) { + if (instance == null) { + instance = new StoredProcedureClassLoader(); + } + } } return instance; } + + private boolean isJarFile(File f) { + return f.getName().lastIndexOf(".jar") > 0; + } + + private boolean isClassFile(File f) { + return f.getName().lastIndexOf(".class") > 0; + } } diff --git a/src/jdbc/com/cubrid/jsp/TargetMethod.java b/src/jdbc/com/cubrid/jsp/TargetMethod.java old mode 100755 new mode 100644 index 15e86b42098..d435843b9d4 --- a/src/jdbc/com/cubrid/jsp/TargetMethod.java +++ b/src/jdbc/com/cubrid/jsp/TargetMethod.java @@ -44,7 +44,7 @@ public class TargetMethod { private String className; - // private Class targetClass; + // private Class targetClass; private String methodName; @@ -75,7 +75,7 @@ public TargetMethod(String signature) throws Exception { } else { className = signature.substring(0, nameStart - 1); } - // targetClass = getClass(className); + // targetClass = getClass(className); methodName = signature.substring(nameStart, argStart - 1); String args = signature.substring(argStart, argEnd); argsTypes = classesFor(args); @@ -92,7 +92,7 @@ private Class[] classesFor(String args) throws ClassNotFoundException, if (args.length() == 0) { return new Class[0]; } - // Count semicolons occurences. + // Count semicolons occurences. int semiColons = 0; for (int i = 0; i < args.length(); i++) { if (args.charAt(i) == ',') { @@ -214,7 +214,7 @@ private static void initdescriptorMap() { public Method getMethod() throws SecurityException, NoSuchMethodException, ClassNotFoundException { return getClass(className).getMethod(methodName, argsTypes); - // return targetClass.getMethod(methodName, argsTypes); + // return targetClass.getMethod(methodName, argsTypes); } public Class[] getArgsTypes() { diff --git a/src/jdbc/com/cubrid/jsp/TargetMethodCache.java b/src/jdbc/com/cubrid/jsp/TargetMethodCache.java index b7994bea4a3..88bb494212c 100644 --- a/src/jdbc/com/cubrid/jsp/TargetMethodCache.java +++ b/src/jdbc/com/cubrid/jsp/TargetMethodCache.java @@ -1,30 +1,30 @@ /* - * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: * - * - Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. * - * - Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. * - * - Neither the name of the nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. + * - Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, - * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. * */ @@ -35,7 +35,7 @@ public class TargetMethodCache { private HashMap methods; - private static TargetMethodCache instance = null; + private static volatile TargetMethodCache instance = null; private TargetMethodCache() { methods = new HashMap(); @@ -55,7 +55,11 @@ public TargetMethod get(String signature) throws Exception { public static TargetMethodCache getInstance() { if (instance == null) { - instance = new TargetMethodCache(); + synchronized (TargetMethodCache.class) { + if (instance == null) { + instance = new TargetMethodCache(); + } + } } return instance; diff --git a/src/jdbc/com/cubrid/jsp/exception/ExecuteException.java b/src/jdbc/com/cubrid/jsp/exception/ExecuteException.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/exception/TypeMismatchException.java b/src/jdbc/com/cubrid/jsp/exception/TypeMismatchException.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/BooleanValue.java b/src/jdbc/com/cubrid/jsp/value/BooleanValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/ByteValue.java b/src/jdbc/com/cubrid/jsp/value/ByteValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/DateValue.java b/src/jdbc/com/cubrid/jsp/value/DateValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/DatetimeValue.java b/src/jdbc/com/cubrid/jsp/value/DatetimeValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/DoubleValue.java b/src/jdbc/com/cubrid/jsp/value/DoubleValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/FloatValue.java b/src/jdbc/com/cubrid/jsp/value/FloatValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/IntValue.java b/src/jdbc/com/cubrid/jsp/value/IntValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/LongValue.java b/src/jdbc/com/cubrid/jsp/value/LongValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/NullValue.java b/src/jdbc/com/cubrid/jsp/value/NullValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/NumericValue.java b/src/jdbc/com/cubrid/jsp/value/NumericValue.java old mode 100755 new mode 100644 index 3b415a557c9..fd0cde2704f --- a/src/jdbc/com/cubrid/jsp/value/NumericValue.java +++ b/src/jdbc/com/cubrid/jsp/value/NumericValue.java @@ -105,7 +105,7 @@ public Object toObject() throws TypeMismatchException { } public String toString() { - return value.toString(); // TODO: using NumberFormat class + return value.toString(); // TODO: using NumberFormat class } public byte[] toByteArray() throws TypeMismatchException { diff --git a/src/jdbc/com/cubrid/jsp/value/ResultSetValue.java b/src/jdbc/com/cubrid/jsp/value/ResultSetValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/SetValue.java b/src/jdbc/com/cubrid/jsp/value/SetValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/ShortValue.java b/src/jdbc/com/cubrid/jsp/value/ShortValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/StringValue.java b/src/jdbc/com/cubrid/jsp/value/StringValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/TimeValue.java b/src/jdbc/com/cubrid/jsp/value/TimeValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/TimestampValue.java b/src/jdbc/com/cubrid/jsp/value/TimestampValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/com/cubrid/jsp/value/Value.java b/src/jdbc/com/cubrid/jsp/value/Value.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/driver/CUBRIDBinaryString.java b/src/jdbc/cubrid/jdbc/driver/CUBRIDBinaryString.java index dffa00b4183..481b66f7d99 100644 --- a/src/jdbc/cubrid/jdbc/driver/CUBRIDBinaryString.java +++ b/src/jdbc/cubrid/jdbc/driver/CUBRIDBinaryString.java @@ -1,58 +1,58 @@ -/* - * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * - Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * - Neither the name of the nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, - * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - * - */ - -package cubrid.jdbc.driver; - -public class CUBRIDBinaryString { - private byte[] bytes; - - public int length; - - public CUBRIDBinaryString(byte[] b) { - bytes = b; - length = b.length; - } - - - public byte[] getBytes() { - return bytes; - } - - - public String toString() { - String result = ""; - - for (int i = 0; i < length; i++) { - result += String.format("%02X ", (byte) bytes[i]); - } - - return result; - } -} +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * - Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + */ + +package cubrid.jdbc.driver; + +public class CUBRIDBinaryString { + private byte[] bytes; + + public int length; + + public CUBRIDBinaryString(byte[] b) { + bytes = b; + length = b.length; + } + + + public byte[] getBytes() { + return bytes; + } + + + public String toString() { + String result = ""; + + for (int i = 0; i < length; i++) { + result += String.format("%02X ", (byte) bytes[i]); + } + + return result; + } +} diff --git a/src/jdbc/cubrid/jdbc/driver/CUBRIDDatabaseMetaData.java b/src/jdbc/cubrid/jdbc/driver/CUBRIDDatabaseMetaData.java index a163ed66c78..5f24efd2454 100644 --- a/src/jdbc/cubrid/jdbc/driver/CUBRIDDatabaseMetaData.java +++ b/src/jdbc/cubrid/jdbc/driver/CUBRIDDatabaseMetaData.java @@ -1215,6 +1215,9 @@ public synchronized ResultSet getColumns(String catalog, } else if (type == UUType.U_TYPE_DATETIMELTZ) { value[4] = new Short((short) java.sql.Types.TIMESTAMP); value[5] = "DATETIMELTZ"; + } else if (type == UUType.U_TYPE_JSON) { + value[4] = new Short((short) java.sql.Types.VARCHAR); + value[5] = "JSON"; } rs.addTuple(value); @@ -1545,6 +1548,11 @@ public synchronized ResultSet getBestRowIdentifier(String catalog, value[3] = "DATETIMELTZ"; value[4] = new Integer(0); break; + case UUType.U_TYPE_JSON: + value[2] = new Integer(java.sql.Types.VARCHAR); + value[3] = "JSON"; + value[4] = new Integer(0); + break; } rs.addTuple(value); diff --git a/src/jdbc/cubrid/jdbc/driver/CUBRIDException.java b/src/jdbc/cubrid/jdbc/driver/CUBRIDException.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/driver/CUBRIDOutResultSet.java b/src/jdbc/cubrid/jdbc/driver/CUBRIDOutResultSet.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/driver/CUBRIDResultSetMetaData.java b/src/jdbc/cubrid/jdbc/driver/CUBRIDResultSetMetaData.java index 1b88f9cbe73..b1879e91053 100644 --- a/src/jdbc/cubrid/jdbc/driver/CUBRIDResultSetMetaData.java +++ b/src/jdbc/cubrid/jdbc/driver/CUBRIDResultSetMetaData.java @@ -394,6 +394,10 @@ protected CUBRIDResultSetMetaData(UColumnInfo[] col_info) { ele_type[i] = java.sql.Types.VARCHAR; ele_type_name[i] = "NCHAR VARYING"; break; + case UUType.U_TYPE_JSON: + ele_type[i] = java.sql.Types.VARCHAR; + ele_type_name[i] = "JSON"; + break; default: break; } @@ -430,6 +434,15 @@ protected CUBRIDResultSetMetaData(UColumnInfo[] col_info) { ele_type[i] = -1; break; + case UUType.U_TYPE_JSON: + col_type_name[i] = "JSON"; + col_type[i] = java.sql.Types.VARCHAR; + ele_type[i] = -1; + if (col_prec[i] > col_disp_size[i]) { + col_disp_size[i] = col_prec[i]; + } + break; + default: break; } @@ -488,6 +501,15 @@ protected CUBRIDResultSetMetaData(UColumnInfo[] col_info) { } col_class_name[i] = "java.lang.String"; } + if (r.type[i] == UUType.U_TYPE_JSON) { + col_type[i] = java.sql.Types.VARCHAR; + col_type_name[i] = "JSON"; + col_prec[i] = r.precision[i]; + if (col_prec[i] > col_disp_size[i]) { + col_disp_size[i] = col_prec[i]; + } + col_class_name[i] = "java.lang.String"; + } if (r.type[i] == UUType.U_TYPE_NULL) { col_type[i] = java.sql.Types.NULL; col_type_name[i] = ""; @@ -517,6 +539,7 @@ private int getDefaultColumnDisplaySize(byte type) { break; case UUType.U_TYPE_VARCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: ret_size = 1; break; case UUType.U_TYPE_BIT: diff --git a/src/jdbc/cubrid/jdbc/jci/CUBRIDArray.java b/src/jdbc/cubrid/jdbc/jci/CUBRIDArray.java old mode 100755 new mode 100644 index c56d167d210..20df969094e --- a/src/jdbc/cubrid/jdbc/jci/CUBRIDArray.java +++ b/src/jdbc/cubrid/jdbc/jci/CUBRIDArray.java @@ -102,6 +102,7 @@ class CUBRIDArray { case UUType.U_TYPE_STRING: case UUType.U_TYPE_VARNCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: internalArray = (Object[]) (new String[length]); break; case UUType.U_TYPE_OBJECT: diff --git a/src/jdbc/cubrid/jdbc/jci/CUBRIDCommandType.java b/src/jdbc/cubrid/jdbc/jci/CUBRIDCommandType.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/CUBRIDIsolationLevel.java b/src/jdbc/cubrid/jdbc/jci/CUBRIDIsolationLevel.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UAParameter.java b/src/jdbc/cubrid/jdbc/jci/UAParameter.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UBatchResult.java b/src/jdbc/cubrid/jdbc/jci/UBatchResult.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UBindKey.java b/src/jdbc/cubrid/jdbc/jci/UBindKey.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UBindParameter.java b/src/jdbc/cubrid/jdbc/jci/UBindParameter.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UColumnInfo.java b/src/jdbc/cubrid/jdbc/jci/UColumnInfo.java old mode 100755 new mode 100644 index 64fc5e2f98d..0a2aac88869 --- a/src/jdbc/cubrid/jdbc/jci/UColumnInfo.java +++ b/src/jdbc/cubrid/jdbc/jci/UColumnInfo.java @@ -224,6 +224,7 @@ private String findFQDN(byte cType, int cPrecision, byte cBaseType) { case UUType.U_TYPE_VARCHAR: case UUType.U_TYPE_VARNCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: return "java.lang.String"; case UUType.U_TYPE_NUMERIC: return "java.math.BigDecimal"; @@ -277,6 +278,7 @@ private String findFQDN(byte cType, int cPrecision, byte cBaseType) { case UUType.U_TYPE_VARCHAR: case UUType.U_TYPE_VARNCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: return "java.lang.String[]"; case UUType.U_TYPE_NUMERIC: return "java.lang.Double[]"; diff --git a/src/jdbc/cubrid/jdbc/jci/UConnection.java b/src/jdbc/cubrid/jdbc/jci/UConnection.java index a3c12784053..90ac463f10a 100644 --- a/src/jdbc/cubrid/jdbc/jci/UConnection.java +++ b/src/jdbc/cubrid/jdbc/jci/UConnection.java @@ -95,9 +95,10 @@ public class UConnection { public static final int PROTOCOL_V5 = 5; public static final int PROTOCOL_V6 = 6; public static final int PROTOCOL_V7 = 7; + public static final int PROTOCOL_V8 = 8; /* Current protocol version */ - private final static byte CAS_PROTOCOL_VERSION = PROTOCOL_V7; + private final static byte CAS_PROTOCOL_VERSION = PROTOCOL_V8; private final static byte CAS_PROTO_INDICATOR = 0x40; private final static byte CAS_PROTO_VER_MASK = 0x3F; private final static byte CAS_RENEWED_ERROR_CODE = (byte) 0x80; diff --git a/src/jdbc/cubrid/jdbc/jci/UError.java b/src/jdbc/cubrid/jdbc/jci/UError.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UErrorCode.java b/src/jdbc/cubrid/jdbc/jci/UErrorCode.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UFunctionCode.java b/src/jdbc/cubrid/jdbc/jci/UFunctionCode.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UGetTypeConvertedValue.java b/src/jdbc/cubrid/jdbc/jci/UGetTypeConvertedValue.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UManageStringInCType.java b/src/jdbc/cubrid/jdbc/jci/UManageStringInCType.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UOutputBuffer.java b/src/jdbc/cubrid/jdbc/jci/UOutputBuffer.java old mode 100755 new mode 100644 index 39e989c333b..16b6e376ea4 --- a/src/jdbc/cubrid/jdbc/jci/UOutputBuffer.java +++ b/src/jdbc/cubrid/jdbc/jci/UOutputBuffer.java @@ -329,6 +329,7 @@ int writeParameter(byte type, Object value, switch (type) { case UUType.U_TYPE_NULL: return addNull(); + case UUType.U_TYPE_JSON: case UUType.U_TYPE_CHAR: case UUType.U_TYPE_NCHAR: case UUType.U_TYPE_STRING: @@ -571,6 +572,7 @@ private int writeCollection(CUBRIDArray data, case UUType.U_TYPE_STRING: case UUType.U_TYPE_VARNCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: for (int i = 0; i < values.length; i++) { if (values[i] == null) { collection_size += addNull(); diff --git a/src/jdbc/cubrid/jdbc/jci/UParameter.java b/src/jdbc/cubrid/jdbc/jci/UParameter.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UPutByOIDParameter.java b/src/jdbc/cubrid/jdbc/jci/UPutByOIDParameter.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UResCache.java b/src/jdbc/cubrid/jdbc/jci/UResCache.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UResultInfo.java b/src/jdbc/cubrid/jdbc/jci/UResultInfo.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UResultTuple.java b/src/jdbc/cubrid/jdbc/jci/UResultTuple.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/USchType.java b/src/jdbc/cubrid/jdbc/jci/USchType.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UShardInfo.java b/src/jdbc/cubrid/jdbc/jci/UShardInfo.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UStatement.java b/src/jdbc/cubrid/jdbc/jci/UStatement.java old mode 100755 new mode 100644 index 55af0a6f3d7..dba62a52401 --- a/src/jdbc/cubrid/jdbc/jci/UStatement.java +++ b/src/jdbc/cubrid/jdbc/jci/UStatement.java @@ -2142,6 +2142,7 @@ private Object readData(UInputBuffer inBuffer, int dataType, int dataSize, Strin case UUType.U_TYPE_STRING: case UUType.U_TYPE_VARNCHAR: case UUType.U_TYPE_ENUM: + case UUType.U_TYPE_JSON: if (charsetName != null && charsetName.equals("BINARY")) { return inBuffer.readBinaryString (dataSize); } else { diff --git a/src/jdbc/cubrid/jdbc/jci/UStatementCacheData.java b/src/jdbc/cubrid/jdbc/jci/UStatementCacheData.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UStmtCache.java b/src/jdbc/cubrid/jdbc/jci/UStmtCache.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UTimedDataInputStream.java b/src/jdbc/cubrid/jdbc/jci/UTimedDataInputStream.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UUType.java b/src/jdbc/cubrid/jdbc/jci/UUType.java old mode 100755 new mode 100644 index fc09cb88f70..a409c626263 --- a/src/jdbc/cubrid/jdbc/jci/UUType.java +++ b/src/jdbc/cubrid/jdbc/jci/UUType.java @@ -94,6 +94,7 @@ abstract public class UUType { public static final byte U_TYPE_DATETIMETZ = 31; public static final byte U_TYPE_DATETIMELTZ = 32; public static final byte U_TYPE_TIMETZ = 33; + public static final byte U_TYPE_JSON = 34; static boolean isCollectionType(byte type) { if (type == UUType.U_TYPE_SET || type == UUType.U_TYPE_MULTISET diff --git a/src/jdbc/cubrid/jdbc/jci/UUpdateParameter.java b/src/jdbc/cubrid/jdbc/jci/UUpdateParameter.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UUrlCache.java b/src/jdbc/cubrid/jdbc/jci/UUrlCache.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/jdbc/jci/UUrlHostKey.java b/src/jdbc/cubrid/jdbc/jci/UUrlHostKey.java old mode 100755 new mode 100644 diff --git a/src/jdbc/cubrid/sql/CUBRIDOID.java b/src/jdbc/cubrid/sql/CUBRIDOID.java old mode 100755 new mode 100644 diff --git a/src/jsp/jsp_cl.c b/src/jsp/jsp_cl.c index f805bb111a8..c9d6cf28662 100644 --- a/src/jsp/jsp_cl.c +++ b/src/jsp/jsp_cl.c @@ -44,7 +44,6 @@ #include "error_manager.h" #include "memory_alloc.h" #include "dbtype.h" -#include "dbdef.h" #include "parser.h" #include "object_domain.h" #include "db.h" diff --git a/src/jsp/jsp_sr.c b/src/jsp/jsp_sr.c index a50d05c320e..dfab2dd243e 100644 --- a/src/jsp/jsp_sr.c +++ b/src/jsp/jsp_sr.c @@ -359,7 +359,8 @@ jsp_start_server (const char *db_name, const char *path) jstring jstr_dbname, jstr_path, jstr_version, jstr_envroot; jobjectArray args; JavaVMInitArgs vm_arguments; - JavaVMOption options[3]; + const int vm_n_options = 3; + JavaVMOption options[vm_n_options]; char classpath[PATH_MAX + 32], logging_prop[PATH_MAX + 32]; char *loc_p, *locale; const char *envroot; @@ -367,7 +368,7 @@ jsp_start_server (const char *db_name, const char *path) char optionString2[] = "-Xrs"; CREATE_VM_FUNC create_vm_func = NULL; - if (prm_get_bool_value (PRM_ID_JAVA_STORED_PROCEDURE) == false) + if (!prm_get_bool_value (PRM_ID_JAVA_STORED_PROCEDURE)) { return NO_ERROR; } @@ -395,7 +396,7 @@ jsp_start_server (const char *db_name, const char *path) options[2].optionString = optionString2; vm_arguments.version = JNI_VERSION_1_4; vm_arguments.options = options; - vm_arguments.nOptions = 3; + vm_arguments.nOptions = vm_n_options; vm_arguments.ignoreUnrecognized = JNI_TRUE; locale = NULL; diff --git a/src/object/authenticate.c b/src/object/authenticate.c index bf3132e04a0..58744bb780c 100644 --- a/src/object/authenticate.c +++ b/src/object/authenticate.c @@ -43,7 +43,6 @@ #include "misc_string.h" #include "memory_alloc.h" #include "dbtype.h" -#include "dbdef.h" #include "error_manager.h" #include "boot_cl.h" #include "work_space.h" diff --git a/src/object/authenticate.h b/src/object/authenticate.h index c7d81429e2e..a393425a792 100644 --- a/src/object/authenticate.h +++ b/src/object/authenticate.h @@ -36,8 +36,8 @@ #include #include "error_manager.h" -#include "dbdef.h" #include "class_object.h" +#include "databases_file.h" /* * Authorization Class Names diff --git a/src/object/class_object.c b/src/object/class_object.c index a586dc0692c..13036324022 100644 --- a/src/object/class_object.c +++ b/src/object/class_object.c @@ -1214,6 +1214,13 @@ classobj_put_index (DB_SEQ ** properties, SM_CONSTRAINT_TYPE type, const char *c } /* add index status. */ + /* If the index_status is set to SM_ONLINE_INDEX_BUILDING_DONE, we must change it to NORMAL_INDEX since + * the index has finished loading and the temporary status was set to avoid some previous checks. + */ + if (index_status == SM_ONLINE_INDEX_BUILDING_DONE) + { + index_status = SM_NORMAL_INDEX; + } db_make_int (&value, index_status); classobj_put_value_and_iterate (constraint, constraint_seq_index, value); diff --git a/src/object/class_object.h b/src/object/class_object.h index d66e9f7ded1..4de2046b321 100644 --- a/src/object/class_object.h +++ b/src/object/class_object.h @@ -953,8 +953,7 @@ extern int classobj_rename_constraint (DB_SEQ * properties, const char *prop_nam const char *new_name); #endif -extern int classobj_change_constraint_comment (DB_SEQ * properties, SM_CLASS_CONSTRAINT *cons, - const char *comment); +extern int classobj_change_constraint_comment (DB_SEQ * properties, SM_CLASS_CONSTRAINT * cons, const char *comment); extern int classobj_get_cached_constraint (SM_CONSTRAINT * constraints, SM_CONSTRAINT_TYPE type, BTID * id); extern bool classobj_has_class_unique_constraint (SM_CLASS_CONSTRAINT * constraints); @@ -1109,7 +1108,7 @@ extern SM_PARTITION *classobj_make_partition_info (void); extern void classobj_free_partition_info (SM_PARTITION * partition_info); extern SM_PARTITION *classobj_copy_partition_info (SM_PARTITION * partition_info); -extern int classobj_change_constraint_status (DB_SEQ * properties, SM_CLASS_CONSTRAINT *cons, +extern int classobj_change_constraint_status (DB_SEQ * properties, SM_CLASS_CONSTRAINT * cons, SM_INDEX_STATUS index_status); #endif /* _CLASS_OBJECT_H_ */ diff --git a/src/object/elo.h b/src/object/elo.h index b537b58f605..a65cda59033 100644 --- a/src/object/elo.h +++ b/src/object/elo.h @@ -54,9 +54,4 @@ extern off_t elo_size (DB_ELO * elo); extern ssize_t elo_read (const DB_ELO * elo, off_t pos, void *buf, size_t count); extern ssize_t elo_write (DB_ELO * elo, off_t pos, const void *buf, size_t count); -#if defined(ENABLE_UNUSED_FUNCTION) -extern int elo_get_meta (const DB_ELO * elo, const char *key, char *buf, int bufsz); -extern int elo_set_meta (DB_ELO * elo, const char *key, const char *val); -#endif - #endif /* _ELO_H_ */ diff --git a/src/object/object_domain.c b/src/object/object_domain.c index 95ac39462cb..ea8b1a1d29f 100644 --- a/src/object/object_domain.c +++ b/src/object/object_domain.c @@ -38,7 +38,7 @@ #include "object_domain.h" #include "object_primitive.h" #include "numeric_opfunc.h" -#include "dbdef.h" +#include "tz_support.h" #include "db_date.h" #include "mprec.h" #include "set_object.h" @@ -355,7 +355,7 @@ static TP_DOMAIN *tp_Domains[] = { &tp_Bigint_domain, &tp_Datetime_domain, - /* beginning of some "padding" built-in domains that can be used as expansion space when new primitive data types are + /* beginning of some "padding" built-in domains that can be used as expansion space when new primitive data types are * added. */ &tp_Blob_domain, &tp_Clob_domain, @@ -624,7 +624,7 @@ tp_init (void) return er_errid (); } - /* + /* * Make sure the next pointer on all the built-in domains is clear. * Also make sure the built-in domain numbers are assigned consistently. * Assign the builtin indexes starting from 1 so we can use zero to mean @@ -713,14 +713,14 @@ tp_final (void) return; } - /* + /* * Make sure the next pointer on all the built-in domains is clear. * Also make sure the built-in domain numbers are assigned consistently. */ for (i = 0; tp_Domains[i] != NULL; i++) { dlist = tp_Domains[i]; - /* + /* * The first element in the domain array is always a built-in, there * can potentially be other built-ins in the list mixed in with * allocated domains. @@ -736,7 +736,7 @@ tp_final (void) { prev->next_list = next; - /* + /* * Make sure to turn off the cache bit or else tp_domain_free * will ignore the request. */ @@ -746,7 +746,7 @@ tp_final (void) } } - /* + /* * tp_Midxkey_domains[0] was cleared by the above for-loop. * It holds a pointer of tp_Midxkey_domain_list_heads[0] on its head. * The pointer is also stored on tp_Domains[DB_TYPE_MIDXKEY]. @@ -848,7 +848,7 @@ tp_enumeration_match (const DB_ENUMERATION * db_enum1, const DB_ENUMERATION * db return 0; } - /* + /* * memcmp is used here because it is necessary for domains like * ENUM('a', 'b') COLLATE utf8_en_ci and * ENUM('A', 'B') COLLATE utf8_en_ci to be regarded as different @@ -949,7 +949,7 @@ tp_domain_free (TP_DOMAIN * dom) db_json_delete_validator (dom->json_validator); } - /* + /* * sub-domains are always completely owned by their root domain, * they cannot be cached anywhere else. */ @@ -1118,7 +1118,7 @@ tp_domain_construct (DB_TYPE domain_type, DB_OBJECT * class_obj, int precision, { new_dm->class_mop = class_obj; new_dm->self_ref = 0; - /* + /* * For compatibility on the server side, class objects must have * the oid in the domain match the oid in the class object. */ @@ -1132,7 +1132,7 @@ tp_domain_construct (DB_TYPE domain_type, DB_OBJECT * class_obj, int precision, } } - /* + /* * have to leave the class OID uninitialized because we don't know how * to get an OID out of a DB_OBJECT on the server. * That shouldn't matter since the server side unpackers will use @@ -1505,7 +1505,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT return 0; } - /* + /* * At this point, either dom1 and dom2 have exactly the same type, or * exact_match is TP_STR_MATCH and dom1 and dom2 are a char/varchar * (nchar/varnchar, bit/varbit) pair. @@ -1540,7 +1540,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT case DB_TYPE_DATE: case DB_TYPE_MONETARY: case DB_TYPE_SHORT: - /* + /* * these domains have no parameters, they match if the types are the * same. */ @@ -1558,7 +1558,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT #if defined (SERVER_MODE) match = OID_EQ (&dom1->class_oid, &dom2->class_oid); #else /* !defined (SERVER_MODE) */ - /* + /* * if "exact" is zero, we should be checking the subclass hierarchy of * dom1 to see id dom2 is in it ! */ @@ -1574,7 +1574,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT } else { - /* + /* * We have a mixture of OID & MOPS, it probably isn't necessary to * be this general but try to avoid assuming the class OIDs have * been set when there is a MOP present. @@ -1739,7 +1739,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT } else if (exact == TP_STR_MATCH) { - /* + /* * Allow the match if the precisions would allow us to reuse the * string without modification. */ @@ -1747,7 +1747,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT } else { - /* + /* * Allow matches regardless of precision, let the actual length of the * value determine if it can be assigned. This is important for * literal strings as their precision will be the maximum but they @@ -1767,7 +1767,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT } /* fall through */ case DB_TYPE_BIT: - /* + /* * Unlike varchar, we have to be a little tighter on domain matches for * fixed width char. Not as much of a problem since these won't be * used for literal strings. @@ -1794,7 +1794,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT } else { - /* + /* * see discussion of special domain precision values in the * DB_TYPE_CHAR case above. */ @@ -1818,7 +1818,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT break; case DB_TYPE_NUMERIC: - /* + /* * note that we never allow inexact matches here because the * mr_setmem_numeric function is not currently able to perform the * deferred coercion. @@ -1830,7 +1830,7 @@ tp_domain_match_internal (const TP_DOMAIN * dom1, const TP_DOMAIN * dom2, TP_MAT case DB_TYPE_ERROR: case DB_TYPE_OID: case DB_TYPE_DB_VALUE: - /* + /* * These are internal domains, they shouldn't be seen, in case they are, * just let them match without parameters. */ @@ -1929,7 +1929,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T *ins_pos = domain; - /* + /* * At this point, either domain and transient have exactly the same type, or * exact_match is TP_STR_MATCH and domain and transient are a char/varchar * (nchar/varnchar, bit/varbit) pair. @@ -1956,7 +1956,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T case DB_TYPE_DATE: case DB_TYPE_MONETARY: case DB_TYPE_SHORT: - /* + /* * these domains have no parameters, they match if asc/desc are the * same */ @@ -1983,7 +1983,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T while (domain) { - /* + /* * if "exact" is zero, we should be checking the subclass hierarchy * of domain to see id transient is in it ! */ @@ -1999,7 +1999,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else { - /* + /* * We have a mixture of OID & MOPS, it probably isn't necessary * to be this general but try to avoid assuming the class OIDs * have been set when there is a MOP present. @@ -2050,7 +2050,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T { int dsize1; - /* + /* * don't bother comparing the lists unless the sizes are the * same */ @@ -2094,7 +2094,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T { int dsize; - /* + /* * don't bother comparing the lists unless the sizes are the * same */ @@ -2237,7 +2237,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else if (exact == TP_STR_MATCH) { - /* + /* * Allow the match if the precisions would allow us to reuse the * string without modification. */ @@ -2247,7 +2247,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else { - /* + /* * Allow matches regardless of precision, let the actual length * of the value determine if it can be assigned. This is * important for literal strings as their precision will be the @@ -2285,7 +2285,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else if (exact == TP_STR_MATCH) { - /* + /* * Allow the match if the precisions would allow us to reuse the * string without modification. */ @@ -2293,7 +2293,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else { - /* + /* * Allow matches regardless of precision, let the actual length * of the value determine if it can be assigned. This is * important for literal strings as their precision will be the @@ -2317,7 +2317,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T case DB_TYPE_BIT: while (domain) { - /* + /* * Unlike varchar, we have to be a little tighter on domain matches * for fixed width char. Not as much of a problem since these won't * be used for literal strings. @@ -2333,7 +2333,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else { - /* + /* * Recognize a precision of TP_FLOATING_PRECISION_VALUE to * indicate a precision whose coercability must be determined * by examing the value. This is used primarily by db_coerce() @@ -2374,7 +2374,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T } else { - /* + /* * see discussion of special domain precision values * in the DB_TYPE_CHAR case above. */ @@ -2431,7 +2431,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T break; case DB_TYPE_NUMERIC: - /* + /* * The first domain is a default domain for numeric type, * actually NUMERIC(15,0). We try to match it first. */ @@ -2445,7 +2445,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T domain = domain->next_list; while (domain) { - /* + /* * The other domains for numeric values are sorted * by descending order of precision and scale. */ @@ -2455,7 +2455,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T break; } - /* + /* * note that we never allow inexact matches here because * the mr_setmem_numeric function is not currently able * to perform the deferred coercion. @@ -2476,7 +2476,7 @@ tp_is_domain_cached (TP_DOMAIN * dlist, TP_DOMAIN * transient, TP_MATCH exact, T case DB_TYPE_ERROR: case DB_TYPE_OID: case DB_TYPE_DB_VALUE: - /* + /* * These are internal domains, they shouldn't be seen, in case they are, * just let them match without parameters. */ @@ -2601,7 +2601,7 @@ tp_domain_find_numeric (DB_TYPE type, int precision, int scale, bool is_desc) /* type : DB_TYPE_NUMERIC */ assert (type == DB_TYPE_NUMERIC); - /* + /* * The first domain is a default domain for numeric type, * actually NUMERIC(15,0). We try to match it first. */ @@ -2645,7 +2645,7 @@ tp_domain_find_charbit (DB_TYPE type, int codeset, int collation_id, unsigned ch TP_DOMAIN *dom; /* tp_domain_find_with_codeset_precision */ - /* + /* * type : DB_TYPE_NCHAR DB_TYPE_VARNCHAR * DB_TYPE_CHAR DB_TYPE_VARCHAR * DB_TYPE_BIT DB_TYPE_VARBIT @@ -2751,7 +2751,7 @@ tp_domain_find_object (DB_TYPE type, OID * class_oid, struct db_object * class_m #if defined (SERVER_MODE) assert_release (false); #else /* defined (SERVER_MODE) */ - /* + /* * We have a mixture of OID & MOPS, it probably isn't necessary to be * this general but try to avoid assuming the class OIDs have been set * when there is a MOP present. @@ -2949,7 +2949,7 @@ tp_domain_cache (TP_DOMAIN * transient) tp_swizzle_oid (transient); #endif /* !SERVER_MODE */ - /* + /* * first search stage: NO LOCK */ /* locate the root of the cache list for domains of this type */ @@ -2962,7 +2962,7 @@ tp_domain_cache (TP_DOMAIN * transient) domain = tp_is_domain_cached (*dlist, transient, TP_EXACT_MATCH, &ins_pos); if (domain != NULL) { - /* + /* * We found one in the cache, free the supplied domain and return * the cached one */ @@ -2971,7 +2971,7 @@ tp_domain_cache (TP_DOMAIN * transient) } } - /* + /* * second search stage: LOCK */ #if defined (SERVER_MODE) @@ -2987,7 +2987,7 @@ tp_domain_cache (TP_DOMAIN * transient) domain = tp_is_domain_cached (*dlist, transient, TP_EXACT_MATCH, &ins_pos); if (domain != NULL) { - /* + /* * We found one in the cache, free the supplied domain and return * the cached one */ @@ -2998,7 +2998,7 @@ tp_domain_cache (TP_DOMAIN * transient) } #endif /* SERVER_MODE */ - /* + /* * We couldn't find one, install the transient domain that was passed in. * Since by far the most common domain match is going to be the built-in * domain at the head of the list, append new domains to the end of the @@ -3104,7 +3104,7 @@ tp_domain_resolve_default (DB_TYPE type) * coll_flag(in): collation flag * Note: * It returns a special domain having the desired collation and collation - * mode flag. Use this in context of type inference for argument coercion + * mode flag. Use this in context of type inference for argument coercion */ TP_DOMAIN * tp_domain_resolve_default_w_coll (DB_TYPE type, int coll_id, TP_DOMAIN_COLL_ACTION coll_flag) @@ -3162,7 +3162,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) if (TP_IS_SET_TYPE (value_type)) { DB_SET *set; - /* + /* * For sets, just return the domain attached to the set since it * will already have been cached. */ @@ -3297,7 +3297,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) domain->collation_id = db_get_string_collation (val); domain->precision = db_value_precision (val); - /* + /* * Convert references to the "floating" precisions to actual * precisions. This may not be necessary or desireable? * Zero seems to pop up occasionally in DB_VALUE precisions, until @@ -3335,7 +3335,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) } break; case DB_TYPE_ENUMERATION: - /* + /* * We have no choice but to return the default enumeration domain * because we cannot construct the domain from a DB_VALUE */ @@ -3359,7 +3359,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) domain->precision = db_value_precision (val); domain->scale = db_value_scale (val); - /* + /* * Hack, precision seems to be commonly -1 DB_VALUES, turn this into * the default "maximum" precision. * This may not be necessary any more. @@ -3386,7 +3386,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) case DB_TYPE_SUB: case DB_TYPE_VARIABLE: case DB_TYPE_DB_VALUE: - /* + /* * These are internal domains, they shouldn't be seen, in case they * are, match to a built-in */ @@ -3431,7 +3431,7 @@ tp_domain_resolve_value (DB_VALUE * val, TP_DOMAIN * dbuf) } break; - /* + /* * things handled in logic outside the switch, shuts up compiler * warnings */ @@ -3598,7 +3598,7 @@ tp_domain_add (TP_DOMAIN ** dlist, TP_DOMAIN * domain) case DB_TYPE_CHAR: case DB_TYPE_NCHAR: case DB_TYPE_BIT: - /* + /* * PR) 1.deficient character related with CHAR & VARCHAR in set. * ==> distinguishing VARCHAR from CHAR. * 2. core dumped & deficient character related with @@ -3890,7 +3890,7 @@ tp_domain_filter_list (TP_DOMAIN * dlist, int *list_changes) } else { - /* + /* * redundant "object" domain, remove, prev can't be NULL here, * will always have at least one domain structure at the head of * the list @@ -3941,7 +3941,7 @@ tp_domain_filter_list (TP_DOMAIN * dlist, int *list_changes) int tp_domain_name (const TP_DOMAIN * domain, char *buffer, int maxlen) { - /* + /* * need to get more sophisticated here, do full name decomposition and * check maxlen */ @@ -4000,7 +4000,7 @@ tp_domain_find_compatible (const TP_DOMAIN * src, const TP_DOMAIN * dest) found = NULL; - /* + /* * If we have a hierarchical domain, perform a lenient "superset" comparison * rather than an exact match. */ @@ -4048,7 +4048,7 @@ tp_domain_compatible (const TP_DOMAIN * src, const TP_DOMAIN * dest) equal = 1; if (src != dest) { - /* + /* * for every domain in src, make sure we have a compatible one in * dest */ @@ -4101,7 +4101,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all best = NULL; - /* + /* * NULL values are allowed in any domain, a NULL domain means that any value * is allowed, return the first thing on the list. */ @@ -4115,7 +4115,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all { if (db_on_server) { - /* + /* * On the server, just make sure that we have any object domain in * the list. */ @@ -4130,7 +4130,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all #if !defined (SERVER_MODE) else { - /* + /* * On the client, swizzle to an object and fall in to the next * clause */ @@ -4150,7 +4150,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all { mop = ws_mop (oid, NULL); db_make_object (&temp, mop); - /* + /* * we don't have to worry about clearing this since its an * object */ @@ -4162,7 +4162,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all #endif /* !SERVER_MODE */ } - /* + /* * Handling of object domains is more complex than just comparing the * types and parameters. We have to see if the instance's class is * somewhere in the subclass hierarchy of the domain class. @@ -4174,7 +4174,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all { if (db_on_server) { - /* + /* * we really shouldn't get here but if we do, handle it like the * OID case above, just return the first object domain that we find. */ @@ -4190,7 +4190,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all #if !defined (SERVER_MODE) else { - /* + /* * On the client, check to see if the instance is within the subclass * hierarchy of the object domains. If there are more than one * acceptable domains, we just pick the first one. @@ -4211,7 +4211,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all #if !defined (SERVER_MODE) else if (vtype == DB_TYPE_POINTER) { - /* + /* * This is necessary in order to correctly choose an object domain from * the domain list when doing an insert nested inside a heterogeneous * set, e.g.: @@ -4237,7 +4237,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all else if (TP_IS_SET_TYPE (vtype)) { - /* + /* * Now that we cache set domains, there might be a faster way to do * this ! */ @@ -4338,7 +4338,7 @@ tp_domain_select (const TP_DOMAIN * domain_list, const DB_VALUE * value, int all } else { - /* + /* * synthesize a domain for the value and look for a match. * Could we be doing this for the set values too ? * Hack, since this will be used only for comparison purposes, @@ -4401,7 +4401,7 @@ tp_domain_select_type (const TP_DOMAIN * domain_list, DB_TYPE type, DB_OBJECT * TP_DOMAIN **others; int i; - /* + /* * NULL values are allowed in any domain, a NULL domain means that any value * is allowed, return the first thing on the list */ @@ -4412,7 +4412,7 @@ tp_domain_select_type (const TP_DOMAIN * domain_list, DB_TYPE type, DB_OBJECT * else { best = NULL; - /* + /* * loop through the domain elements looking for one the fits, * rather than make type comparisons for each element in the loop, * do them out here and duplicate the loop @@ -4455,7 +4455,7 @@ tp_domain_select_type (const TP_DOMAIN * domain_list, DB_TYPE type, DB_OBJECT * others = tp_Domain_conversion_matrix[type]; if (others != NULL) { - /* + /* * loop through the allowable conversions until we find * one that appears in the supplied domain list, the * array is ordered in terms of priority, @@ -5816,7 +5816,7 @@ tp_value_coerce_strict (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } } - /* + /* * If src == dest, coerce into a temporary variable and * handle the conversion before returning. */ @@ -5829,7 +5829,7 @@ tp_value_coerce_strict (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * target = dest; } - /* + /* * Initialize the destination domain, important for the * nm_ coercion functions which take domain information inside the * destination db value. @@ -7115,6 +7115,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * */ DB_JSON_TYPE json_type = db_json_get_type (db_get_json_document (src)); JSON_DOC *src_doc = db_get_json_document (src); + bool use_replacement = true; switch (json_type) { @@ -7124,21 +7125,43 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * case DB_JSON_INT: db_make_int (&src_replacement, db_json_get_int_from_document (src_doc)); break; + case DB_JSON_BIGINT: + db_make_bigint (&src_replacement, db_json_get_bigint_from_document (src_doc)); + break; + case DB_JSON_BOOL: + switch (desired_type) + { + case DB_TYPE_CHAR: + case DB_TYPE_VARCHAR: + case DB_TYPE_NCHAR: + case DB_TYPE_VARNCHAR: + db_make_string (&src_replacement, db_json_get_bool_as_str_from_document (src_doc)); + src_replacement.need_clear = true; + break; + default: + db_make_int (&src_replacement, db_json_get_bool_from_document (src_doc) ? 1 : 0); + } + break; case DB_JSON_STRING: { - const char *json_string = NULL; - - json_string = db_json_get_string_from_document (src_doc); + const char *json_string = db_json_get_string_from_document (src_doc); db_make_string_by_const_str (&src_replacement, json_string); } break; default: + use_replacement = false; /* do nothing */ break; } - if (json_type != DB_JSON_ARRAY && json_type != DB_JSON_OBJECT) + if (use_replacement) { + if (src == dest) + { + // if src is equal to dest then JSON_DOC can be deleted after required information was extracted from it + pr_clear_value (dest); + } + original_type = DB_VALUE_TYPE (&src_replacement); src = &src_replacement; } @@ -7146,7 +7169,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (desired_type == original_type) { - /* + /* * If there is an easy to check exact match on a non-parameterized * domain, just do a simple clone of the value. */ @@ -7188,7 +7211,10 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * ASSERT_ERROR (); return DOMAIN_ERROR; } - pr_clone_value ((DB_VALUE *) src, dest); + if (src != dest) + { + pr_clone_value (src, dest); + } pr_clear_value (&src_replacement); return (status); default: @@ -7198,7 +7224,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } } - /* + /* * If the coercion_mode is TP_IMPLICIT_COERCION, check to see if the original * type can be implicitly coerced to the desired_type. * @@ -7224,7 +7250,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } } - /* + /* * If src == dest, coerce into a temporary variable and * handle the conversion before returning. */ @@ -7237,7 +7263,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * target = dest; } - /* + /* * Initialize the destination domain, important for the * nm_ coercion functions thich take domain information inside the * destination db value. @@ -7399,9 +7425,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7512,9 +7541,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7651,9 +7683,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7725,9 +7760,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7787,9 +7825,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7814,7 +7855,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * break; case DB_TYPE_NUMERIC: - /* + /* * Numeric-to-numeric coercion will be handled in the nm_ module. * The desired precision & scale is communicated through the destination * value. @@ -7832,9 +7873,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; } - status = DOMAIN_INCOMPATIBLE; } else { @@ -7899,9 +7943,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (er_errid () != NO_ERROR) /* i.e, malloc failure */ { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + } + else + { + status = DOMAIN_INCOMPATIBLE; /* conversion error */ } - status = DOMAIN_INCOMPATIBLE; /* conversion error */ break; } @@ -7933,7 +7980,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * case DB_TYPE_VARNCHAR: if (tp_atoutime (src, &v_utime) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } else { @@ -8049,7 +8096,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * case DB_TYPE_VARNCHAR: if (tp_atotimestamptz (src, &v_timestamptz) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } else { @@ -8298,7 +8345,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * case DB_TYPE_VARNCHAR: if (tp_atoudatetime (src, &v_datetime) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } else { @@ -8395,7 +8442,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * case DB_TYPE_VARNCHAR: if (tp_atodatetimetz (src, &v_datetimetz) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } else { @@ -8583,12 +8630,13 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } else { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (db_make_date (target, month, day, year) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } break; @@ -8797,12 +8845,13 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } else { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (db_make_time (target, hour, minute, second) != NO_ERROR) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; } break; case DB_TYPE_ENUMERATION: @@ -8842,6 +8891,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (!sm_check_class_domain ((TP_DOMAIN *) desired_domain, ((DB_OTMPL *) db_get_pointer (src))->classobj)) { status = DOMAIN_INCOMPATIBLE; + break; } db_make_pointer (target, db_get_pointer (src)); break; @@ -8854,7 +8904,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * is_vclass = db_is_vclass (desired_domain->class_mop); if (is_vclass < 0) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (!is_vclass) { @@ -8886,11 +8937,12 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * is_vclass = db_is_vclass (desired_domain->class_mop); if (is_vclass < 0) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (is_vclass) { - /* + /* * This should still be an error, and the above * code should have constructed a virtual mop. * I'm not sure the rest of the code is consistent @@ -8928,7 +8980,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * set_domain = setobj_domain (setref->set); if (src == dest && tp_domain_compatible (set_domain, desired_domain)) { - /* + /* * We know that this is a "coerce-in-place" operation, and * we know that no coercion is necessary, so do nothing: we * can use the exact same set without any conversion. @@ -8943,7 +8995,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { if (tp_domain_compatible (set_domain, desired_domain)) { - /* + /* * Well, we can't use the exact same set, but we don't * have to do the whole hairy coerce thing either: we * can just make a copy and then take the more general @@ -8958,7 +9010,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } else { - /* + /* * Well, now we have to use the whole hairy coercion * thing. Too bad... * @@ -8999,7 +9051,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (original_type == DB_TYPE_VOBJ) { SETREF *setref; - /* + /* * We should try and convert the view of the src to match * the view of the desired_domain. However, the desired * domain generally does not contain this information. @@ -9015,7 +9067,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * } else { - /* + /* * this is a "coerce-in-place", and no coercion is necessary, * so do nothing: use the same vobj without any conversion. set * "src" to NULL to prevent the wrapup code from clearing dest. @@ -9054,7 +9106,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * seq = db_seq_create (NULL, NULL, 3); keys = *src; - /* + /* * if we are on the server, and get a DB_TYPE_OBJECT, * then its only possible representation is a DB_TYPE_OID, * and it may be treated that way. However, this should @@ -9164,7 +9216,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (src == dest && tp_can_steal_string (src, desired_domain)) { tp_value_slam_domain (dest, desired_domain); - /* + /* * Set "src" to NULL to prevent the wrapup code from undoing * our work; since we haven't actually made a copy, we don't * want to clear the original. @@ -9201,7 +9253,7 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (src == dest && tp_can_steal_string (src, desired_domain)) { tp_value_slam_domain (dest, desired_domain); - /* + /* * Set "src" to NULL to prevent the wrapup code from undoing * our work; since we haven't actually made a copy, we don't * want to clear the original. @@ -9256,7 +9308,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * new_string = (char *) db_private_alloc (NULL, max_size); if (!new_string) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (original_type == DB_TYPE_BIGINT) @@ -9339,7 +9392,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * new_string = (char *) db_private_alloc (NULL, max_size); if (new_string == NULL) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } strcpy (new_string, str_buf); @@ -9367,7 +9421,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * new_string = (char *) db_private_alloc (NULL, max_size); if (!new_string) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } snprintf (new_string, max_size - 1, "%s%.*f", lang_currency_symbol (db_get_monetary (src)->type), 2, @@ -9412,7 +9467,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * new_string = (char *) db_private_alloc (NULL, max_size); if (!new_string) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } err = NO_ERROR; @@ -9490,7 +9546,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * new_string = (char *) db_private_alloc (NULL, max_size); if (!new_string) { - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } convert_error = bfmt_print (1 /* BIT_STRING_HEX */ , src, @@ -9977,7 +10034,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * if (error_code != NO_ERROR) { assert (doc == NULL); - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } if (desired_domain->json_validator @@ -9985,7 +10043,8 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * { ASSERT_ERROR (); db_json_delete_doc (doc); - return DOMAIN_ERROR; + status = DOMAIN_ERROR; + break; } } break; @@ -9993,6 +10052,10 @@ tp_value_cast_internal (const DB_VALUE * src, DB_VALUE * dest, const TP_DOMAIN * doc = db_json_allocate_doc (); db_json_set_int_to_doc (doc, db_get_int (src)); break; + case DB_TYPE_BIGINT: + doc = db_json_allocate_doc (); + db_json_set_bigint_to_doc (doc, db_get_bigint (src)); + break; case DB_TYPE_DOUBLE: doc = db_json_allocate_doc (); db_json_set_double_to_doc (doc, db_get_double (src)); @@ -10128,7 +10191,7 @@ tp_value_cast_no_domain_select (const DB_VALUE * src, DB_VALUE * dest, const TP_ } /* - * tp_value_change_coll_and_codeset () - change the collation and codeset of a + * tp_value_change_coll_and_codeset () - change the collation and codeset of a * value * returns: cast operation result * src(in): source DB_VALUE @@ -10331,7 +10394,7 @@ tp_set_compare (const DB_VALUE * value1, const DB_VALUE * value2, int do_coercio status = tp_value_coerce (v2, &temp, tp_domain_resolve_default (vtype1)); if (status != DOMAIN_COMPATIBLE) { - /* + /* * This is arguably an error condition * but Not Equal is as close as we can come * to reporting it. @@ -10351,7 +10414,7 @@ tp_set_compare (const DB_VALUE * value1, const DB_VALUE * value2, int do_coercio status = tp_value_coerce (v1, &temp, tp_domain_resolve_default (vtype2)); if (status != DOMAIN_COMPATIBLE) { - /* + /* * This is arguably an error condition * but Not Equal is as close as we can come * to reporting it. @@ -10372,7 +10435,7 @@ tp_set_compare (const DB_VALUE * value1, const DB_VALUE * value2, int do_coercio s1 = db_get_set (v1); s2 = db_get_set (v2); - /* + /* * there may ba a call for set_compare returning a total * ordering some day. */ @@ -10479,7 +10542,7 @@ tp_value_compare_with_error (const DB_VALUE * value1, const DB_VALUE * value2, i vtype1 = DB_VALUE_DOMAIN_TYPE (v1); vtype2 = DB_VALUE_DOMAIN_TYPE (v2); - /* + /* * Hack, DB_TYPE_OID & DB_TYPE_OBJECT are logically the same domain * although their physical representations are different. * If we see a pair of those, handle it up front before we @@ -10533,7 +10596,7 @@ tp_value_compare_with_error (const DB_VALUE * value1, const DB_VALUE * value2, i } #endif /* !defined (SERVER_MODE) */ - /* + /* * If value types aren't exact, try coercion. * May need to be using the domain returned by * tp_domain_resolve_value here ? @@ -10639,11 +10702,14 @@ tp_value_compare_with_error (const DB_VALUE * value1, const DB_VALUE * value2, i status = tp_value_coerce (v2, &temp2, d1); if (status != DOMAIN_COMPATIBLE) { - /* + /* * This is arguably an error condition * but Not Equal is as close as we can come * to reporting it. */ + + // WARNING: forget any error for coercion. + er_clear (); } else { @@ -10682,11 +10748,14 @@ tp_value_compare_with_error (const DB_VALUE * value1, const DB_VALUE * value2, i status = tp_value_coerce (v1, &temp1, d2); if (status != DOMAIN_COMPATIBLE) { - /* + /* * This is arguably an error condition * but Not Equal is as close as we can come * to reporting it. */ + + // WARNING: forget any error for coercion. + er_clear (); } else { @@ -10699,7 +10768,7 @@ tp_value_compare_with_error (const DB_VALUE * value1, const DB_VALUE * value2, i if (!ARE_COMPARABLE (vtype1, vtype2)) { - /* + /* * Default result for mismatched types. * Not correct but will be consistent. */ @@ -10920,7 +10989,7 @@ tp_domain_disk_size (TP_DOMAIN * domain) assert (domain->precision != TP_FLOATING_PRECISION_VALUE); - /* + /* * Use the "lengthmem" function here with a NULL pointer. The size will * not be dependent on the actual value. * The decision of whether or not to use the lengthmem function probably @@ -10957,7 +11026,7 @@ tp_domain_memory_size (TP_DOMAIN * domain) return -1; } - /* + /* * Use the "lengthmem" function here with a NULL pointer and a "disk" * flag of zero. * This will cause it to return the instance memory size. @@ -11037,7 +11106,7 @@ tp_check_value_size (TP_DOMAIN * domain, DB_VALUE * value) case DB_TYPE_CHAR: case DB_TYPE_NCHAR: case DB_TYPE_BIT: - /* + /* * The compatibility will be determined by the precision. * A floating precision is determined by the length of the string * value. @@ -11072,7 +11141,7 @@ tp_check_value_size (TP_DOMAIN * domain, DB_VALUE * value) case DB_TYPE_VARCHAR: case DB_TYPE_VARNCHAR: case DB_TYPE_VARBIT: - /* + /* * The compatibility of the value is always determined by the * actual length of the value, not the destination precision. */ @@ -11089,7 +11158,7 @@ tp_check_value_size (TP_DOMAIN * domain, DB_VALUE * value) src_length = db_get_string_size (value); } - /* + /* * Work backwards from the source length into a minimum precision. * This feels like it should be a nice packed utility * function somewhere. @@ -11104,7 +11173,7 @@ tp_check_value_size (TP_DOMAIN * domain, DB_VALUE * value) break; default: - /* + /* * None of the other types require this form of value dependent domain * precision checking. */ @@ -11312,7 +11381,7 @@ tp_domain_references_objects (const TP_DOMAIN * dom) dom = dom->setdomain; if (dom) { - /* + /* * If domains are specified, we can assume that the upper levels * have enforced the rule that no value in the collection has a * domain that isn't included in this list. If this list has no @@ -11331,7 +11400,7 @@ tp_domain_references_objects (const TP_DOMAIN * dom) } else { - /* + /* * We've got hold of one of our fabulous "collection of anything" * attributes. We've got no choice but to assume that it might * have objects in it. @@ -11670,7 +11739,7 @@ tp_digit_number_str_to_bi (char *start, char *end, INTL_CODESET codeset, bool is } else { - /* Copy the number to str, excluding leading spaces and '0's and trailing spaces. Anything other than leading and + /* Copy the number to str, excluding leading spaces and '0's and trailing spaces. Anything other than leading and * trailing spaces already resulted in an error. */ if (is_negative) { diff --git a/src/object/object_primitive.c b/src/object/object_primitive.c index 38eed3d9d3b..1b72dc0a63d 100644 --- a/src/object/object_primitive.c +++ b/src/object/object_primitive.c @@ -2084,6 +2084,23 @@ pr_clear_value (DB_VALUE * value) return NO_ERROR; } +/* + * pr_clear_value_vector - clear a vector of db_values + * references + * return: void + * value(in/out): vector of values + */ +/* *INDENT-OFF* */ +void +pr_clear_value_vector (std::vector &value_vector) +{ + for (DB_VALUE &dbval : value_vector) + { + pr_clear_value (&dbval); + } +} +/* *INDENT-ON* */ + /* * pr_free_value - free an internval value container any anything that it * references diff --git a/src/object/object_primitive.h b/src/object/object_primitive.h index 139d984f00b..990f408b8db 100644 --- a/src/object/object_primitive.h +++ b/src/object/object_primitive.h @@ -39,6 +39,7 @@ #include "thread_compat.hpp" #ifdef __cplusplus +#include class string_buffer; #endif @@ -339,6 +340,12 @@ extern "C" extern int pr_clear_value (DB_VALUE * var); +#if defined __cplusplus + /* *INDENT-OFF* */ + void pr_clear_value_vector (std::vector &value_vector); + /* *INDENT-ON* */ +#endif + #ifdef __cplusplus } #endif diff --git a/src/object/object_print_util.cpp b/src/object/object_print_util.cpp index e9934ef6b35..1995448ef1e 100644 --- a/src/object/object_print_util.cpp +++ b/src/object/object_print_util.cpp @@ -23,7 +23,6 @@ #include "object_print_util.hpp" -#include "dbdef.h" #include "work_space.h" #include diff --git a/src/object/object_printer.cpp b/src/object/object_printer.cpp index 4445c1aea15..736153e4279 100644 --- a/src/object/object_printer.cpp +++ b/src/object/object_printer.cpp @@ -26,7 +26,6 @@ #include "class_object.h" #include "db_json.hpp" #include "db_value_printer.hpp" -#include "dbdef.h" #include "dbi.h" #include "dbtype.h" #include "misc_string.h" diff --git a/src/object/schema_manager.c b/src/object/schema_manager.c index a2f262481eb..c59d938a727 100644 --- a/src/object/schema_manager.c +++ b/src/object/schema_manager.c @@ -398,6 +398,8 @@ static int sm_drop_cascade_foreign_key (SM_CLASS * class_); static char *sm_default_constraint_name (const char *class_name, DB_CONSTRAINT_TYPE type, const char **att_names, const int *asc_desc); +static int sm_load_online_index (MOP classmop, const char *constraint_name); + static const char *sm_locate_method_file (SM_CLASS * class_, const char *function); #if defined (WINDOWS) @@ -10241,14 +10243,14 @@ allocate_index (MOP classop, SM_CLASS * class_, DB_OBJLIST * subclasses, SM_ATTR fk_refcls_pk_btid, fk_name, SM_GET_FILTER_PRED_STREAM (filter_index), SM_GET_FILTER_PRED_STREAM_SIZE (filter_index), function_index->expr_stream, function_index->expr_stream_size, function_index->col_id, - function_index->attr_index_start); + function_index->attr_index_start, index_status); } else { error = btree_load_index (index, constraint_name, domain, oids, n_classes, n_attrs, attr_ids, (int *) attrs_prefix_length, hfids, unique_pk, not_null, fk_refcls_oid, fk_refcls_pk_btid, fk_name, SM_GET_FILTER_PRED_STREAM (filter_index), - SM_GET_FILTER_PRED_STREAM_SIZE (filter_index), NULL, -1, -1, -1); + SM_GET_FILTER_PRED_STREAM_SIZE (filter_index), NULL, -1, -1, -1, index_status); } } @@ -12677,6 +12679,7 @@ update_class (SM_TEMPLATE * template_, MOP * classmop, int auto_res, DB_AUTH aut { abort_subclasses (newsubs); classobj_free_template (flat); + /* don't touch this class if we aborted ! */ if (class_ != NULL && error != ER_LK_UNILATERALLY_ABORTED) { @@ -14587,7 +14590,6 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * int error = NO_ERROR; SM_TEMPLATE *def; MOP newmop = NULL; - LOCK ex_lock = SCH_M_LOCK; bool needs_hierarchy_lock; if (att_names == NULL) @@ -14617,7 +14619,12 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * auth = AU_ALTER; } - // TODO: secondary index on partition. + if (index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS && classop->lock > IX_LOCK) + { + // if the transaction already hold a lock which is greater than IX, + // we don't allow online index creation for transaction consistency. + index_status = SM_NORMAL_INDEX; + } def = smt_edit_class_mop (classop, auth); if (def == NULL) @@ -14688,6 +14695,7 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * } needs_hierarchy_lock = DB_IS_CONSTRAINT_UNIQUE_FAMILY (constraint_type); + /* This one frees the template inside!!! */ error = sm_update_class_with_auth (def, &newmop, auth, needs_hierarchy_lock); if (error != NO_ERROR) { @@ -14697,24 +14705,19 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * if (index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS) { - /* Demote lock for online index. */ - error = locator_demote_class_lock (&newmop->oid_info.oid, IX_LOCK, &ex_lock); + // Load index phase. + error = sm_load_online_index (newmop, constraint_name); if (error != NO_ERROR) { - smt_quit (def); return error; } - // Load index phase. - /* TODO For now, the index will be empty. */ - error = sm_update_statistics (newmop, STATS_WITH_SAMPLING); if (error != NO_ERROR) { return error; } - /* Promote the lock for online index. */ def = smt_edit_class_mop (classop, auth); if (def == NULL) { @@ -14723,7 +14726,7 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * } /* If we have an online index, we need to change the constraint to SM_ONLINE_INDEX_BUILDING_DONE, and - * add remove the old one from the property list. We also do not want to do some later checks. + * remove the old one from the property list. We also do not want to do some later checks. */ // TODO: Why do we remove and add it rather than just change the property? @@ -14737,6 +14740,7 @@ sm_add_constraint (MOP classop, DB_CONSTRAINT_TYPE constraint_type, const char * } /* Update the class now. */ + /* This one frees the template inside!!! */ error = sm_update_class_with_auth (def, &newmop, auth, needs_hierarchy_lock); if (error != NO_ERROR) { @@ -16393,3 +16397,208 @@ sm_stats_remove_bt_stats_at_position (ATTR_STATS * attr_stats, int position) /* Make it unavailable. */ attr_stats->n_btstats--; } + +int +sm_load_online_index (MOP classmop, const char *constraint_name) +{ + SM_CLASS *class_ = NULL; + int error = NO_ERROR; + SM_CLASS_CONSTRAINT *con = NULL; + TP_DOMAIN *domain; + int i, n_attrs, n_classes, max_classes; + DB_TYPE type; + DB_OBJLIST *subclasses, *sub; + int *attr_ids = NULL; + size_t attr_ids_size; + OID *oids = NULL; + HFID *hfids = NULL; + int reverse; + int unique_pk = 0; + int not_null; + + /* Fetch the class. */ + error = au_fetch_class (classmop, &class_, AU_FETCH_UPDATE, AU_ALTER); + if (error != NO_ERROR) + { + goto error_return; + } + + /* Get subclasses. */ + subclasses = class_->users; + + /* Get the constraint on which we want to load the online index. */ + con = classobj_find_constraint_by_name (class_->constraints, constraint_name); + if (con == NULL) + { + /* This should never happen. */ + error = ER_FAILED; + goto error_return; + } + + /* Safeguards. */ + assert (con != NULL); + assert (con->index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS); + + /* We must check if the constraint isn't shared from another one. */ + if (con->shared_cons_name != NULL) + { + /* The BTID already exists and surely it has been loaded. Therefore we can just stop here */ + return NO_ERROR; + } + + /* Count the attributes */ + for (i = 0, n_attrs = 0; con->attributes[i] != NULL; i++, n_attrs++) + { + type = con->attributes[i]->type->id; + if (!tp_valid_indextype (type)) + { + error = ER_SM_INVALID_INDEX_TYPE; + er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, error, 1, pr_type_name (type)); + } + else if (con->attrs_prefix_length && con->attrs_prefix_length[i] >= 0) + { + if (!TP_IS_CHAR_TYPE (type) && !TP_IS_BIT_TYPE (type)) + { + error = ER_SM_INVALID_INDEX_WITH_PREFIX_TYPE; + er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, error, 1, pr_type_name (type)); + } + else if (((long) con->attributes[i]->domain->precision) < con->attrs_prefix_length[i]) + { + error = ER_SM_INVALID_PREFIX_LENGTH; + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_SM_INVALID_PREFIX_LENGTH, 1, con->attrs_prefix_length[i]); + } + } + } + + if (error != NO_ERROR) + { + goto error_return; + } + + if (con->func_index_info) + { + if (con->func_index_info->attr_index_start == 0) + { + /* if this is a single column function index, the key domain is actually the domain of the function + * result */ + domain = con->func_index_info->fi_domain; + } + else + { + domain = + construct_index_key_domain (con->func_index_info->attr_index_start, con->attributes, con->asc_desc, + con->attrs_prefix_length, con->func_index_info->col_id, + con->func_index_info->fi_domain); + } + } + else + { + domain = construct_index_key_domain (n_attrs, con->attributes, con->asc_desc, con->attrs_prefix_length, -1, NULL); + } + + /* Count maximum possible subclasses */ + max_classes = 1; /* Start with 1 for the current class */ + for (sub = subclasses; sub != NULL; sub = sub->next) + { + max_classes++; + } + + /* Allocate arrays to hold subclass information */ + attr_ids_size = max_classes * n_attrs * sizeof (int); + attr_ids = (int *) malloc (attr_ids_size); + if (attr_ids == NULL) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, attr_ids_size); + goto error_return; + } + + oids = (OID *) malloc (max_classes * sizeof (OID)); + if (oids == NULL) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, max_classes * sizeof (OID)); + goto error_return; + } + + hfids = (HFID *) malloc (max_classes * sizeof (HFID)); + if (hfids == NULL) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, max_classes * sizeof (HFID)); + goto error_return; + } + + /* Enter the base class information into the arrays */ + n_classes = 0; + COPY_OID (&oids[n_classes], WS_OID (classmop)); + for (i = 0; i < n_attrs; i++) + { + attr_ids[i] = con->attributes[i]->id; + } + HFID_COPY (&hfids[n_classes], sm_ch_heap ((MOBJ) class_)); + n_classes++; + + if (con->type == SM_CONSTRAINT_REVERSE_INDEX || con->type == SM_CONSTRAINT_REVERSE_UNIQUE) + { + reverse = 1; + } + else + { + reverse = 0; + } + + if (con->type == SM_CONSTRAINT_UNIQUE || con->type == SM_CONSTRAINT_REVERSE_UNIQUE) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE; + not_null = 0; + } + else if (con->type == SM_CONSTRAINT_PRIMARY_KEY) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE | BTREE_CONSTRAINT_PRIMARY_KEY; + not_null = 1; + } + + if (con->func_index_info) + { + error = btree_load_index (&con->index_btid, constraint_name, domain, oids, n_classes, n_attrs, attr_ids, + (int *) con->attrs_prefix_length, hfids, unique_pk, not_null, NULL, + NULL, NULL, SM_GET_FILTER_PRED_STREAM (con->filter_predicate), + SM_GET_FILTER_PRED_STREAM_SIZE (con->filter_predicate), + con->func_index_info->expr_stream, con->func_index_info->expr_stream_size, + con->func_index_info->col_id, con->func_index_info->attr_index_start, + con->index_status); + } + else + { + error = btree_load_index (&con->index_btid, constraint_name, domain, oids, n_classes, n_attrs, attr_ids, + (int *) con->attrs_prefix_length, hfids, unique_pk, not_null, NULL, + NULL, NULL, SM_GET_FILTER_PRED_STREAM (con->filter_predicate), + SM_GET_FILTER_PRED_STREAM_SIZE (con->filter_predicate), NULL, -1, -1, -1, + con->index_status); + } + + free_and_init (attr_ids); + free_and_init (oids); + free_and_init (hfids); + + return error; + +error_return: + if (error != ER_TM_SERVER_DOWN_UNILATERALLY_ABORTED && error != ER_LK_UNILATERALLY_ABORTED) + { + (void) tran_abort_upto_system_savepoint (UNIQUE_SAVEPOINT_NAME); + } + + if (attr_ids != NULL) + { + free_and_init (attr_ids); + } + if (oids != NULL) + { + free_and_init (oids); + } + if (hfids != NULL) + { + free_and_init (hfids); + } + + return error; +} diff --git a/src/object/schema_manager.h b/src/object/schema_manager.h index 3728f8652f5..2606ce536b8 100644 --- a/src/object/schema_manager.h +++ b/src/object/schema_manager.h @@ -38,7 +38,6 @@ #include "class_object.h" /* for SM_CLASS */ #include "schema_template.h" /* template interface */ #include "trigger_manager.h" /* for TR_EVENT_TYPE */ -#include "dbdef.h" /* * This is NOT the "object" class but rather functions more like diff --git a/src/object/trigger_manager.c b/src/object/trigger_manager.c index 66f1d5ca575..3376860be48 100644 --- a/src/object/trigger_manager.c +++ b/src/object/trigger_manager.c @@ -31,7 +31,6 @@ #include "memory_alloc.h" #include "error_manager.h" #include "dbtype.h" -#include "dbdef.h" #include "trigger_manager.h" #include "memory_hash.h" #include "work_space.h" diff --git a/src/object/trigger_manager.h b/src/object/trigger_manager.h index 0549e0d658d..add1c8e2024 100644 --- a/src/object/trigger_manager.h +++ b/src/object/trigger_manager.h @@ -28,7 +28,6 @@ #include "memory_alloc.h" #include "dbtype_def.h" -#include "dbdef.h" #include "class_object.h" /* diff --git a/src/object/work_space.h b/src/object/work_space.h index a9990cebcc6..e955abbf25f 100644 --- a/src/object/work_space.h +++ b/src/object/work_space.h @@ -31,7 +31,6 @@ #include #include "oid.h" #include "storage_common.h" -#include "dbdef.h" #include "quick_fit.h" #include "locator.h" #include "dbtype_def.h" diff --git a/src/optimizer/optimizer.h b/src/optimizer/optimizer.h index 7aa2c8649a7..d45f791fed9 100644 --- a/src/optimizer/optimizer.h +++ b/src/optimizer/optimizer.h @@ -38,6 +38,7 @@ #include "parser.h" #include "release_string.h" #include "parser.h" +#include "xasl.h" /* * These #defines are used in conjunction with assert() to announce diff --git a/src/optimizer/query_graph.c b/src/optimizer/query_graph.c index 5bcc0c81935..db6b71dd44f 100644 --- a/src/optimizer/query_graph.c +++ b/src/optimizer/query_graph.c @@ -2728,7 +2728,11 @@ set_seg_expr (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *continue_ *continue_walk = PT_STOP_WALK; } } + break; + case PT_JSON_TABLE: + (void) parser_walk_tree (parser, tree->info.json_table_info.expr, set_seg_expr, arg, pt_continue_walk, NULL); + *continue_walk = PT_LIST_WALK; break; default: @@ -2848,20 +2852,30 @@ qo_is_equi_join_term (QO_TERM * term) static bool is_dependent_table (PT_NODE * entity) { - if (entity->info.spec.derived_table) + if (entity->info.spec.derived_table == NULL) { - /* this test is too pessimistic. The argument must depend on a previous entity spec in the from list. - * >>>> FIXME some day <<<< - */ - if (entity->info.spec.derived_table_type == PT_IS_SET_EXPR /* is cselect derived table of method */ - || entity->info.spec.derived_table_type == PT_IS_CSELECT - || entity->info.spec.derived_table->info.query.correlation_level == 1) - { - return true; - } + return false; } - return false; + /* this test is too pessimistic. The argument must depend on a previous entity spec in the from list. + * >>>> FIXME some day <<<< + * + * is this still a thing? + */ + switch (entity->info.spec.derived_table_type) + { + case PT_IS_SET_EXPR: + case PT_IS_CSELECT: + return true; + + case PT_DERIVED_JSON_TABLE: + return true; + + case PT_IS_SUBQUERY: + default: + // what else? + return entity->info.spec.derived_table->info.query.correlation_level == 1; + } } /* @@ -3260,7 +3274,11 @@ get_expr_fcode_rank (FUNC_TYPE fcode) case F_JSON_ARRAY: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_SEARCH: + case F_JSON_CONTAINS_PATH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: case F_JSON_INSERT: case F_JSON_REPLACE: @@ -7402,7 +7420,6 @@ qo_discover_indexes (QO_ENV * env) /* iterate over all nodes and find indexes for each node */ for (i = 0; i < env->nnodes; i++) { - nodep = QO_ENV_NODE (env, i); if (nodep->info) { @@ -7414,23 +7431,20 @@ qo_discover_indexes (QO_ENV * env) (PT_SPEC_FLAG_RECORD_INFO_SCAN | PT_SPEC_FLAG_PAGE_INFO_SCAN))) { qo_find_node_indexes (env, nodep); - /* collect statistic information on discovered indexes */ - qo_get_index_info (env, nodep); - } - else - { - QO_NODE_INDEXES (nodep) = NULL; + if (0 < QO_NODE_INFO_N (nodep) && QO_NODE_INDEXES (nodep) != NULL) + { + /* collect statistics if discovers an usable index */ + qo_get_index_info (env, nodep); + continue; + } + /* fall through */ } - } - else - { - /* If the 'info' of node is NULL, then this is probably a derived table. Without the info, we don't have - * class information to work with so we really can't do much so just skip the node. - */ - QO_NODE_INDEXES (nodep) = NULL; /* this node will not use a index */ + /* fall through */ } - } /* for (n = 0; n < env->nnodes; n++) */ + /* this node will not use an index */ + QO_NODE_INDEXES (nodep) = NULL; + } /* for each terms, look indexed segements and filter out the segments which don't actually contain any indexes */ for (i = 0; i < env->nterms; i++) diff --git a/src/optimizer/query_graph.h b/src/optimizer/query_graph.h index c143790b356..b7dd17a25f1 100644 --- a/src/optimizer/query_graph.h +++ b/src/optimizer/query_graph.h @@ -458,7 +458,7 @@ struct qo_segment bool class_attr; bool shared_attr; - /* is index term equatity expression? */ + /* is index term equality expression? */ bool index_term_eq_expr; /* @@ -471,7 +471,7 @@ struct qo_segment */ QO_ATTR_INFO *info; - /* indexable terms to which this segment belings */ + /* indexable terms to which this segment belongs */ BITSET index_terms; /* The index of this segment in the corresponding Env's seg array. */ @@ -541,6 +541,7 @@ typedef enum * t g k m * h e e */ + // todo: explain meaning of each flag QO_TC_PATH = 0x30, /* 1 1 0 000 */ QO_TC_JOIN = 0x11, /* 0 1 0 001 */ QO_TC_SARG = 0x02, /* 0 0 0 010 */ diff --git a/src/parser/compile.c b/src/parser/compile.c index b5976b7265b..580ecd86766 100644 --- a/src/parser/compile.c +++ b/src/parser/compile.c @@ -485,6 +485,11 @@ pt_class_pre_fetch (PARSER_CONTEXT * parser, PT_NODE * statement) } } + if (statement->node_type == PT_CREATE_ENTITY && statement->info.create_entity.entity_type == PT_CLASS) + { + (void) parser_walk_tree (parser, statement, NULL, NULL, pt_resolve_cte_specs, NULL); + } + return statement; } diff --git a/src/parser/csql_grammar.y b/src/parser/csql_grammar.y index 310e4402669..998af4f76ad 100644 --- a/src/parser/csql_grammar.y +++ b/src/parser/csql_grammar.y @@ -24,6 +24,7 @@ %{/*%CODE_REQUIRES_START%*/ +#include "json_table_def.h" #include "parser.h" /* @@ -83,6 +84,7 @@ void csql_yyerror (const char *s); extern int g_msg[1024]; extern int msg_ptr; extern int yybuffer_pos; +extern size_t json_table_column_count; /*%CODE_END%*/%} %{ @@ -102,10 +104,10 @@ extern int yybuffer_pos; #include "chartype.h" #include "parser.h" #include "parser_message.h" -#include "dbdef.h" #include "language_support.h" #include "unicode_support.h" #include "environment_variable.h" +#include "dbtype.h" #include "transaction_cl.h" #include "csql_grammar_scan.h" #include "system_parameter.h" @@ -115,7 +117,6 @@ extern int yybuffer_pos; #endif /* WINDOWS */ #include "memory_alloc.h" #include "db_elo.h" -#include "dbtype.h" #if defined (SUPPRESS_STRLEN_WARNING) #define strlen(s1) ((int) strlen(s1)) @@ -314,9 +315,11 @@ static FUNCTION_MAP functions[] = { {"json_type", PT_JSON_TYPE}, {"json_extract", PT_JSON_EXTRACT}, {"json_valid", PT_JSON_VALID}, + {"json_unquote", PT_JSON_UNQUOTE}, {"json_length", PT_JSON_LENGTH}, + {"json_quote", PT_JSON_QUOTE}, {"json_depth", PT_JSON_DEPTH}, - {"json_search", PT_JSON_SEARCH}, + {"json_pretty", PT_JSON_PRETTY}, }; @@ -533,6 +536,7 @@ static PT_NODE * pt_create_date_value (PARSER_CONTEXT *parser, const char *str); static PT_NODE * pt_create_json_value (PARSER_CONTEXT *parser, const char *str); +static void pt_jt_append_column_or_nested_node (PT_NODE * jt_node, PT_NODE * jt_col_or_nested); static void pt_value_set_charset_coll (PARSER_CONTEXT *parser, PT_NODE *node, const int codeset_id, @@ -616,6 +620,7 @@ int g_original_buffer_len; container_3 c3; container_4 c4; container_10 c10; + struct json_table_column_behavior jtcb; } @@ -751,6 +756,16 @@ int g_original_buffer_len; %type insert_value_clause %type insert_value_clause_list %type insert_stmt_value_clause +%type select_or_subquery_without_values_query_no_with_clause +%type csql_query_without_values_query_no_with_clause +%type select_expression_without_values_query_no_with_clause +%type csql_query_without_subquery_and_with_clause +%type select_expression_without_subquery +%type select_or_values_query +%type subquery_without_subquery_and_with_clause +%type select_or_nested_values_query +%type csql_query_without_values_and_single_subquery +%type select_expression_without_values_and_single_subquery %type insert_expression_value_clause %type insert_value_list %type insert_value @@ -831,6 +846,7 @@ int g_original_buffer_len; %type sp_param_def %type esql_query_stmt %type csql_query +%type csql_query_select_has_no_with_clause %type csql_query_without_values_query %type select_expression_opt_with %type select_expression @@ -1022,6 +1038,10 @@ int g_original_buffer_len; %type limit_expr %type limit_term %type limit_factor +%type json_table_rule +%type json_table_node_rule +%type json_table_column_rule +%type json_table_column_list_rule /*}}}*/ /* define rule type (cptr) */ @@ -1068,6 +1088,13 @@ int g_original_buffer_len; %type of_cast_data_type /*}}}*/ +/* define rule type (json_table_column_behavior) */ +/*{{{*/ +%type json_table_column_behavior_rule +%type json_table_on_error_rule_optional +%type json_table_on_empty_rule_optional +/*}}}*/ + /* Token define */ /*{{{*/ %token ABSOLUTE_ @@ -1178,9 +1205,11 @@ int g_original_buffer_len; %token EACH %token ELSE %token ELSEIF +%token EMPTY %token END %token ENUM %token EQUALS +%token ERROR_ %token ESCAPE %token EVALUATE %token EXCEPT @@ -1203,14 +1232,24 @@ int g_original_buffer_len; %token FULL %token FUNCTION %token FUN_JSON_ARRAY -%token FUN_JSON_OBJECT -%token FUN_JSON_MERGE +%token FUN_JSON_ARRAY_APPEND +%token FUN_JSON_ARRAY_INSERT +%token FUN_JSON_SEARCH +%token FUN_JSON_CONTAINS_PATH +%token FUN_JSON_GET_ALL_PATHS %token FUN_JSON_INSERT +%token FUN_JSON_KEYS +%token FUN_JSON_MERGE +%token FUN_JSON_MERGE_PATCH +%token FUN_JSON_MERGE_PRESERVE +%token FUN_JSON_OBJECT +%token FUN_JSON_REMOVE %token FUN_JSON_REPLACE %token FUN_JSON_SET %token FUN_JSON_KEYS %token FUN_JSON_REMOVE %token FUN_JSON_ARRAY_APPEND +%token FUN_JSON_ARRAY_INSERT %token FUN_JSON_GET_ALL_PATHS %token GENERAL %token GET @@ -1246,6 +1285,8 @@ int g_original_buffer_len; %token IS %token ISOLATION %token JOIN +%token JSON +%token JSON_TABLE %token KEY %token KEYLIMIT %token LANGUAGE @@ -1286,6 +1327,7 @@ int g_original_buffer_len; %token NATIONAL %token NATURAL %token NCHAR +%token NESTED %token NEXT %token NO %token NOT @@ -1302,6 +1344,7 @@ int g_original_buffer_len; %token OPTION %token OR %token ORDER +%token ORDINALITY %token OUT_ %token OUTER %token OUTPUT @@ -1446,7 +1489,6 @@ int g_original_buffer_len; %token YEAR_ %token YEAR_MONTH %token ZONE -%token JSON %token YEN_SIGN %token DOLLAR_SIGN @@ -1475,6 +1517,7 @@ int g_original_buffer_len; %token DOT %token RIGHT_ARROW +%token DOUBLE_RIGHT_ARROW %token STRCAT %token COMP_NOT_EQ %token COMP_GE @@ -1539,6 +1582,8 @@ int g_original_buffer_len; %token KEYS %token KILL %token JAVA +%token JSON_ARRAYAGG +%token JSON_OBJECTAGG %token JOB %token LAG %token LAST_VALUE @@ -1563,6 +1608,7 @@ int g_original_buffer_len; %token OFFSET %token ONLINE %token OPEN +%token PATH %token OWNER %token PAGE %token PARTITIONING @@ -1851,8 +1897,17 @@ stmt_ { $$ = $1; } | do_stmt { $$ = $1; } - | esql_query_stmt - { $$ = $1; } + | opt_with_clause + esql_query_stmt + {{ + PT_NODE *with_clause = $1; + PT_NODE *stmt = $2; + if (stmt && with_clause) + { + stmt->info.query.with = with_clause; + } + $$ = stmt; + DBG_PRINT}} | evaluate_stmt { $$ = $1; } | prepare_stmt @@ -1861,10 +1916,28 @@ stmt_ { $$ = $1; } | insert_or_replace_stmt { $$ = $1; } - | update_stmt - { $$ = $1; } - | delete_stmt - { $$ = $1; } + | opt_with_clause + update_stmt + {{ + PT_NODE *with_clause = $1; + PT_NODE *stmt = $2; + if (stmt && with_clause) + { + stmt->info.update.with = with_clause; + } + $$ = stmt; + DBG_PRINT}} + | opt_with_clause + delete_stmt + {{ + PT_NODE *with_clause = $1; + PT_NODE *stmt = $2; + if (stmt && with_clause) + { + stmt->info.delete_.with = with_clause; + } + $$ = stmt; + DBG_PRINT}} | show_stmt { $$ = $1; } | call_stmt @@ -4449,7 +4522,7 @@ join_table_spec PT_NODE *sopt = $3; bool natural = false; - if ($4 == PT_JOIN_NONE) + if ($4 == NULL) { /* Not exists ON condition, if it is outer join, report error */ if ($1 == PT_JOIN_LEFT_OUTER @@ -4493,7 +4566,7 @@ join_condition : /* empty */ {{ parser_save_and_set_pseudoc (0); - $$ = PT_JOIN_NONE; /* just return NULL */ + $$ = NULL; /* just return NULL */ DBG_PRINT}} | ON_ {{ @@ -4609,7 +4682,7 @@ original_table_spec if ($3) { PT_NODE *hint = NULL, *alias = NULL; - char *qualifier_name = NULL; + const char *qualifier_name = NULL; /* Get qualifier */ alias = CONTAINER_AT_0 ($2); @@ -4752,6 +4825,19 @@ original_table_spec $$ = ent; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | JSON_TABLE json_table_rule AS identifier + {{ + PT_NODE *ent = parser_new_node (this_parser, PT_SPEC); + if (ent) + { + ent->info.spec.derived_table = $2; // json_table_rule + ent->info.spec.derived_table_type = PT_DERIVED_JSON_TABLE; + ent->info.spec.range_var = $4; // identifier + } + $$ = ent; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} ; @@ -6364,10 +6450,27 @@ insert_stmt_value_clause PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | csql_query_without_values_query + | opt_with_clause + csql_query_without_values_and_single_subquery + {{ + + PT_NODE *with_clause = $1; + PT_NODE *select_node = $2; + select_node->info.query.with = with_clause; + PT_NODE *nls = pt_node_list (this_parser, PT_IS_SUBQUERY, select_node); + + $$ = nls; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | '(' opt_with_clause + csql_query_without_values_query_no_with_clause ')' {{ + PT_NODE *with_clause = $2; + PT_NODE *select_node = $3; + select_node->info.query.with = with_clause; + PT_NODE *nls = pt_node_list (this_parser, PT_IS_SUBQUERY, select_node); - PT_NODE *nls = pt_node_list (this_parser, PT_IS_SUBQUERY, $1); $$ = nls; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) @@ -10438,7 +10541,7 @@ transaction_mode if (tm && is) { PARSER_SAVE_ERR_CONTEXT (tm, @$.buffer_pos) - async_ws_or_error = TO_NUMBER (CONTAINER_AT_3 ($3)); + async_ws_or_error = (int) TO_NUMBER (CONTAINER_AT_3 ($3)); if (async_ws_or_error < 0) { PT_ERRORm(this_parser, tm, MSGCAT_SET_PARSER_SYNTAX, @@ -10451,7 +10554,7 @@ transaction_mode tm->info.isolation_lvl.async_ws = async_ws_or_error; - async_ws_or_error = TO_NUMBER (CONTAINER_AT_3 ($5)); + async_ws_or_error = (int) TO_NUMBER (CONTAINER_AT_3 ($5)); if (async_ws_or_error < 0) { PT_ERRORm(this_parser, is, MSGCAT_SET_PARSER_SYNTAX, @@ -10512,7 +10615,7 @@ transaction_mode {{ PT_NODE *tm = parser_new_node (this_parser, PT_ISOLATION_LVL); - int async_ws_or_error = TO_NUMBER (CONTAINER_AT_3 ($3)); + int async_ws_or_error = (int) TO_NUMBER (CONTAINER_AT_3 ($3)); PARSER_SAVE_ERR_CONTEXT (tm, @$.buffer_pos) @@ -11711,7 +11814,7 @@ opt_sp_in_out esql_query_stmt : { parser_select_level++; } - csql_query + csql_query_select_has_no_with_clause {{ $$ = $2; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) @@ -11808,7 +11911,7 @@ csql_query DBG_PRINT}} ; -csql_query_without_values_query +csql_query_select_has_no_with_clause : {{ @@ -11825,7 +11928,7 @@ csql_query_without_values_query parser_save_and_set_pseudoc (1); DBG_PRINT}} - select_expression_without_values_query + select_expression {{ PT_NODE *node = $2; @@ -11896,200 +11999,819 @@ csql_query_without_values_query DBG_PRINT}} ; -select_expression_opt_with - : opt_with_clause - select_expression +csql_query_without_subquery_and_with_clause + : {{ - PT_NODE *with_clause = $1; - PT_NODE *stmt = $2; - if (stmt && with_clause) - { - stmt->info.query.with = with_clause; - } - - $$ = stmt; - - DBG_PRINT}} - ; + parser_save_and_set_cannot_cache (false); + parser_save_and_set_ic (0); + parser_save_and_set_gc (0); + parser_save_and_set_oc (0); + parser_save_and_set_wjc (0); + parser_save_and_set_sysc (0); + parser_save_and_set_prc (0); + parser_save_and_set_cbrc (0); + parser_save_and_set_serc (1); + parser_save_and_set_sqc (1); + parser_save_and_set_pseudoc (1); -select_expression - : select_expression - {{ - PT_NODE *node = $1; - parser_push_orderby_node (node); - }} - opt_orderby_clause - {{ - - PT_NODE *node = parser_pop_orderby_node (); - - if (node && parser_cannot_cache) - { - node->info.query.reexecute = 1; - node->info.query.do_cache = 0; - node->info.query.do_not_cache = 1; - } - + DBG_PRINT}} + select_expression_without_subquery + {{ - if (parser_subquery_check == 0) - PT_ERRORmf(this_parser, pt_top(this_parser), - MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); - - if (node) - { + PT_NODE *node = $2; + parser_push_orderby_node (node); - PT_NODE *order = node->info.query.order_by; - if (order && order->info.sort_spec.expr - && order->info.sort_spec.expr->node_type == PT_VALUE - && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) - { - if (!node->info.query.q.select.group_by) - { - PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); - } - else - { - parser_free_tree (this_parser, node->info.query.order_by); - node->info.query.order_by = NULL; - } - } - } - - parser_push_orderby_node (node); - DBG_PRINT}} - opt_select_limit_clause - opt_for_update_clause + opt_orderby_clause {{ - + PT_NODE *node = parser_pop_orderby_node (); - $$ = node; - PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) - - DBG_PRINT}} - table_op select_or_subquery - {{ - - PT_NODE *stmt = $8; - PT_NODE *arg1 = $1; - - if (stmt) - { - stmt->info.query.id = (UINTPTR) stmt; - stmt->info.query.q.union_.arg1 = $1; - stmt->info.query.q.union_.arg2 = $9; - - if (arg1 != NULL - && arg1->info.query.is_subquery != PT_IS_SUBQUERY - && arg1->info.query.order_by != NULL) + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + parser_restore_cannot_cache (); + parser_restore_ic (); + parser_restore_gc (); + parser_restore_oc (); + parser_restore_wjc (); + parser_restore_sysc (); + parser_restore_prc (); + parser_restore_cbrc (); + parser_restore_serc (); + parser_restore_sqc (); + parser_restore_pseudoc (); + + if (parser_subquery_check == 0) + PT_ERRORmf(this_parser, pt_top(this_parser), + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + /* handle ORDER BY NULL */ + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) { - PT_ERRORm (this_parser, stmt, - MSGCAT_SET_PARSER_SYNTAX, - MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } } - } - + } - $$ = stmt; - PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + parser_push_orderby_node (node); DBG_PRINT}} - | select_or_subquery + opt_select_limit_clause + opt_for_update_clause {{ - $$ = $1; + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} ; -select_expression_without_values_query - : select_expression_without_values_query - {{ - PT_NODE *node = $1; - parser_push_orderby_node (node); - }} - opt_orderby_clause - {{ - - PT_NODE *node = parser_pop_orderby_node (); - - if (node && parser_cannot_cache) - { - node->info.query.reexecute = 1; - node->info.query.do_cache = 0; - node->info.query.do_not_cache = 1; - } - +csql_query_without_values_query + : + {{ - if (parser_subquery_check == 0) - PT_ERRORmf(this_parser, pt_top(this_parser), - MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); - - if (node) - { + parser_save_and_set_cannot_cache (false); + parser_save_and_set_ic (0); + parser_save_and_set_gc (0); + parser_save_and_set_oc (0); + parser_save_and_set_wjc (0); + parser_save_and_set_sysc (0); + parser_save_and_set_prc (0); + parser_save_and_set_cbrc (0); + parser_save_and_set_serc (1); + parser_save_and_set_sqc (1); + parser_save_and_set_pseudoc (1); - PT_NODE *order = node->info.query.order_by; - if (order && order->info.sort_spec.expr - && order->info.sort_spec.expr->node_type == PT_VALUE - && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) - { - if (!node->info.query.q.select.group_by) - { - PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); - } - else - { - parser_free_tree (this_parser, node->info.query.order_by); - node->info.query.order_by = NULL; - } - } - } - - parser_push_orderby_node (node); - DBG_PRINT}} - opt_select_limit_clause - opt_for_update_clause + select_expression_without_values_query {{ - + + PT_NODE *node = $2; + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_orderby_clause + {{ + PT_NODE *node = parser_pop_orderby_node (); - $$ = node; - PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) - - DBG_PRINT}} - table_op select_or_subquery_without_values_query - {{ - - PT_NODE *stmt = $8; - PT_NODE *arg1 = $1; - - if (stmt) - { - stmt->info.query.id = (UINTPTR) stmt; - stmt->info.query.q.union_.arg1 = $1; - stmt->info.query.q.union_.arg2 = $9; - if (arg1 != NULL - && arg1->info.query.is_subquery != PT_IS_SUBQUERY - && arg1->info.query.order_by != NULL) - { - PT_ERRORm (this_parser, stmt, - MSGCAT_SET_PARSER_SYNTAX, - MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); - } - } + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + parser_restore_cannot_cache (); + parser_restore_ic (); + parser_restore_gc (); + parser_restore_oc (); + parser_restore_wjc (); + parser_restore_sysc (); + parser_restore_prc (); + parser_restore_cbrc (); + parser_restore_serc (); + parser_restore_sqc (); + parser_restore_pseudoc (); + + if (parser_subquery_check == 0) + PT_ERRORmf(this_parser, pt_top(this_parser), + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + /* handle ORDER BY NULL */ + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +csql_query_without_values_query_no_with_clause + : + {{ + + parser_save_and_set_cannot_cache (false); + parser_save_and_set_ic (0); + parser_save_and_set_gc (0); + parser_save_and_set_oc (0); + parser_save_and_set_wjc (0); + parser_save_and_set_sysc (0); + parser_save_and_set_prc (0); + parser_save_and_set_cbrc (0); + parser_save_and_set_serc (1); + parser_save_and_set_sqc (1); + parser_save_and_set_pseudoc (1); + + DBG_PRINT}} + select_expression_without_values_query_no_with_clause + {{ + + PT_NODE *node = $2; + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + parser_restore_cannot_cache (); + parser_restore_ic (); + parser_restore_gc (); + parser_restore_oc (); + parser_restore_wjc (); + parser_restore_sysc (); + parser_restore_prc (); + parser_restore_cbrc (); + parser_restore_serc (); + parser_restore_sqc (); + parser_restore_pseudoc (); + + if (parser_subquery_check == 0) + PT_ERRORmf(this_parser, pt_top(this_parser), + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + /* handle ORDER BY NULL */ + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +csql_query_without_values_and_single_subquery + : + {{ + + parser_save_and_set_cannot_cache (false); + parser_save_and_set_ic (0); + parser_save_and_set_gc (0); + parser_save_and_set_oc (0); + parser_save_and_set_wjc (0); + parser_save_and_set_sysc (0); + parser_save_and_set_prc (0); + parser_save_and_set_cbrc (0); + parser_save_and_set_serc (1); + parser_save_and_set_sqc (1); + parser_save_and_set_pseudoc (1); + + DBG_PRINT}} + select_expression_without_values_and_single_subquery + {{ + + PT_NODE *node = $2; + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + parser_restore_cannot_cache (); + parser_restore_ic (); + parser_restore_gc (); + parser_restore_oc (); + parser_restore_wjc (); + parser_restore_sysc (); + parser_restore_prc (); + parser_restore_cbrc (); + parser_restore_serc (); + parser_restore_sqc (); + parser_restore_pseudoc (); + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + /* handle ORDER BY NULL */ + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + + +select_expression_opt_with + : opt_with_clause + select_expression + {{ + + PT_NODE *with_clause = $1; + PT_NODE *stmt = $2; + if (stmt && with_clause) + { + stmt->info.query.with = with_clause; + } + + $$ = stmt; + + DBG_PRINT}} + ; + +select_expression_without_subquery + : select_expression_without_subquery + {{ + PT_NODE *node = $1; + parser_push_orderby_node (node); + }} + + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + + table_op select_or_values_query + {{ + + PT_NODE *stmt = $8; + PT_NODE *arg1 = $1; + + if (stmt) + { + stmt->info.query.id = (UINTPTR) stmt; + stmt->info.query.q.union_.arg1 = $1; + stmt->info.query.q.union_.arg2 = $9; + + if (arg1 != NULL + && arg1->info.query.is_subquery != PT_IS_SUBQUERY + && arg1->info.query.order_by != NULL) + { + PT_ERRORm (this_parser, stmt, + MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + } + } + + + $$ = stmt; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | select_or_values_query + {{ + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +select_expression + : select_expression + {{ + PT_NODE *node = $1; + parser_push_orderby_node (node); + }} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + table_op select_or_subquery + {{ + + PT_NODE *stmt = $8; + PT_NODE *arg1 = $1; + + if (stmt) + { + stmt->info.query.id = (UINTPTR) stmt; + stmt->info.query.q.union_.arg1 = $1; + stmt->info.query.q.union_.arg2 = $9; + + if (arg1 != NULL + && arg1->info.query.is_subquery != PT_IS_SUBQUERY + && arg1->info.query.order_by != NULL) + { + PT_ERRORm (this_parser, stmt, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + } + } + + + $$ = stmt; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | select_or_subquery + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +select_expression_without_values_query + : select_expression_without_values_query + {{ + PT_NODE *node = $1; + parser_push_orderby_node (node); + }} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + table_op select_or_subquery_without_values_query + {{ + + PT_NODE *stmt = $8; + PT_NODE *arg1 = $1; + + if (stmt) + { + stmt->info.query.id = (UINTPTR) stmt; + stmt->info.query.q.union_.arg1 = $1; + stmt->info.query.q.union_.arg2 = $9; + if (arg1 != NULL + && arg1->info.query.is_subquery != PT_IS_SUBQUERY + && arg1->info.query.order_by != NULL) + { + PT_ERRORm (this_parser, stmt, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + } + } + + + $$ = stmt; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | select_or_subquery_without_values_query + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +select_expression_without_values_query_no_with_clause + : select_expression_without_values_query_no_with_clause + {{ + PT_NODE *node = $1; + parser_push_orderby_node (node); + }} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + table_op select_or_subquery_without_values_query_no_with_clause + {{ + + PT_NODE *stmt = $8; + PT_NODE *arg1 = $1; + + if (stmt) + { + stmt->info.query.id = (UINTPTR) stmt; + stmt->info.query.q.union_.arg1 = $1; + stmt->info.query.q.union_.arg2 = $9; + if (arg1 != NULL + && arg1->info.query.is_subquery != PT_IS_SUBQUERY + && arg1->info.query.order_by != NULL) + { + PT_ERRORm (this_parser, stmt, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + } + } + + + $$ = stmt; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | select_or_subquery_without_values_query_no_with_clause + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +select_expression_without_values_and_single_subquery + : select_expression_without_values_query_no_with_clause + {{ + PT_NODE *node = $1; + parser_push_orderby_node (node); + }} + opt_orderby_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + + if (node && parser_cannot_cache) + { + node->info.query.reexecute = 1; + node->info.query.do_cache = 0; + node->info.query.do_not_cache = 1; + } + + + if (parser_subquery_check == 0) + PT_ERRORmf (this_parser, pt_top(this_parser), MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_NOT_ALLOWED_HERE, "Subquery"); + + if (node) + { + + PT_NODE *order = node->info.query.order_by; + if (order && order->info.sort_spec.expr + && order->info.sort_spec.expr->node_type == PT_VALUE + && order->info.sort_spec.expr->type_enum == PT_TYPE_NULL) + { + if (!node->info.query.q.select.group_by) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_ORDERBYNULL_REQUIRES_GROUPBY); + } + else + { + parser_free_tree (this_parser, node->info.query.order_by); + node->info.query.order_by = NULL; + } + } + } + + parser_push_orderby_node (node); + + DBG_PRINT}} + opt_select_limit_clause + opt_for_update_clause + {{ + + PT_NODE *node = parser_pop_orderby_node (); + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + table_op select_or_subquery_without_values_query_no_with_clause + {{ + + PT_NODE *stmt = $8; + PT_NODE *arg1 = $1; + + if (stmt) + { + stmt->info.query.id = (UINTPTR) stmt; + stmt->info.query.q.union_.arg1 = $1; + stmt->info.query.q.union_.arg2 = $9; + if (arg1 != NULL + && arg1->info.query.is_subquery != PT_IS_SUBQUERY + && arg1->info.query.order_by != NULL) + { + PT_ERRORm (this_parser, stmt, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_UNION_ORDERBY); + } + } $$ = stmt; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | select_or_subquery_without_values_query + | select_or_nested_values_query {{ $$ = $1; @@ -12202,6 +12924,21 @@ select_or_subquery DBG_PRINT}} ; +select_or_values_query + : values_query + {{ + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | select_stmt + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + select_or_subquery_without_values_query : select_stmt {{ @@ -12219,6 +12956,38 @@ select_or_subquery_without_values_query DBG_PRINT}} ; +select_or_subquery_without_values_query_no_with_clause + : select_stmt + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | subquery_without_subquery_and_with_clause + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + +select_or_nested_values_query + : select_stmt + {{ + + $$ = $1; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | '(' values_query ')' + {{ + $$ = $2; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + ; + values_query : of_value_values {{ @@ -12382,8 +13151,8 @@ opt_with_clause DBG_PRINT}} | WITH /* $1 */ - opt_recursive /* $2 */ - cte_definition_list /* $3 */ + opt_recursive /* $2 */ + cte_definition_list /* $3 */ {{ PT_NODE *node = parser_new_node (this_parser, PT_WITH_CLAUSE); @@ -12968,7 +13737,7 @@ to_param if (val) { val->info.name.meta_class = PT_PARAMETER; - val->info.name.spec_id = (long) val; + val->info.name.spec_id = (UINTPTR) val; val->info.name.resolved = pt_makename ("out parameter"); } @@ -12984,7 +13753,7 @@ to_param if (val) { val->info.name.meta_class = PT_PARAMETER; - val->info.name.spec_id = (long) val; + val->info.name.spec_id = (UINTPTR) val; val->info.name.resolved = pt_makename ("out parameter"); } @@ -15232,6 +16001,43 @@ reserved_func PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} + | JSON_ARRAYAGG '(' expression_ ')' + {{ + + PT_NODE *node = parser_new_node (this_parser, PT_FUNCTION); + + if (node) + { + node->info.function.function_type = PT_JSON_ARRAYAGG; + node->info.function.all_or_distinct = PT_ALL; + node->info.function.arg_list = parser_make_link ($3, NULL); + } + + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | JSON_OBJECTAGG '(' expression_list ')' + {{ + PT_NODE *node = parser_new_node (this_parser, PT_FUNCTION); + PT_NODE *args_list = $3; + + if (parser_count_list(args_list) != 2) + { + PT_ERRORm (this_parser, node, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_INVALID_JSON_OBJECTAGG); + } + + if (node) + { + node->info.function.function_type = PT_JSON_OBJECTAGG; + node->info.function.all_or_distinct = PT_ALL; + node->info.function.arg_list = args_list; + } + + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} | of_percentile '(' expression_ ')' WITHIN GROUP_ '(' ORDER BY sort_spec ')' opt_over_analytic_partition_by {{ @@ -16195,6 +17001,25 @@ reserved_func "json_array"); } + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | FUN_JSON_CONTAINS_PATH '(' expression_list ')' + {{ + PT_NODE *args_list = $3; + PT_NODE *node = NULL; + int len; + + len = parser_count_list (args_list); + node = parser_make_expr_with_func (this_parser, F_JSON_CONTAINS_PATH, args_list); + if (len < 3) + { + PT_ERRORmf (this_parser, args_list, + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_INVALID_INTERNAL_FUNCTION, + "json_contains_path"); + } + $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} @@ -16214,6 +17039,44 @@ reserved_func "json_merge"); } + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | FUN_JSON_MERGE_PATCH '(' expression_list ')' + {{ + PT_NODE *args_list = $3; + PT_NODE *node = NULL; + int len; + + len = parser_count_list (args_list); + node = parser_make_expr_with_func (this_parser, F_JSON_MERGE_PATCH, args_list); + if (len < 2) + { + PT_ERRORmf (this_parser, args_list, + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_INVALID_INTERNAL_FUNCTION, + "json_merge_patch"); + } + + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | FUN_JSON_MERGE_PRESERVE '(' expression_list ')' + {{ + PT_NODE *args_list = $3; + PT_NODE *node = NULL; + int len; + + len = parser_count_list (args_list); + node = parser_make_expr_with_func (this_parser, F_JSON_MERGE, args_list); + if (len < 2) + { + PT_ERRORmf (this_parser, args_list, + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_INVALID_INTERNAL_FUNCTION, + "json_merge_preserve"); + } + $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} @@ -16236,7 +17099,7 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | FUN_JSON_REPLACE '(' expression_list ')' + | FUN_JSON_REPLACE '(' expression_list ')' {{ PT_NODE *args_list = $3; PT_NODE *node = NULL; @@ -16255,7 +17118,7 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | FUN_JSON_SET '(' expression_list ')' + | FUN_JSON_SET '(' expression_list ')' {{ PT_NODE *args_list = $3; PT_NODE *node = NULL; @@ -16274,7 +17137,7 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | FUN_JSON_KEYS '(' expression_list ')' + | FUN_JSON_KEYS '(' expression_list ')' {{ PT_NODE *args_list = $3; PT_NODE *node = NULL; @@ -16312,7 +17175,7 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | FUN_JSON_ARRAY_APPEND '(' expression_list ')' + | FUN_JSON_ARRAY_APPEND '(' expression_list ')' {{ PT_NODE *args_list = $3; PT_NODE *node = NULL; @@ -16331,7 +17194,44 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} - | FUN_JSON_GET_ALL_PATHS '(' expression_list ')' + | FUN_JSON_ARRAY_INSERT '(' expression_list ')' + {{ + PT_NODE *args_list = $3; + PT_NODE *node = NULL; + int len; + + len = parser_count_list (args_list); + node = parser_make_expr_with_func (this_parser, F_JSON_ARRAY_INSERT, args_list); + if (len < 3 || len % 2 != 1) + { + PT_ERRORmf (this_parser, args_list, + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_INVALID_INTERNAL_FUNCTION, + "json_array_insert"); + } + + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | FUN_JSON_SEARCH '(' expression_list ')' + {{ + PT_NODE *args_list = $3; + PT_NODE *node = NULL; + int len = parser_count_list (args_list); + node = parser_make_expr_with_func (this_parser, F_JSON_SEARCH, args_list); + + if (len < 3) + { + PT_ERRORmf (this_parser, args_list, + MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_INVALID_INTERNAL_FUNCTION, + "json_search"); + } + + $$ = node; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | FUN_JSON_GET_ALL_PATHS '(' expression_list ')' {{ PT_NODE *args_list = $3; PT_NODE *node = NULL; @@ -16350,6 +17250,40 @@ reserved_func $$ = node; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) DBG_PRINT}} + | identifier RIGHT_ARROW CHAR_STRING + {{ + PT_NODE *matcher = parser_new_node (this_parser, PT_VALUE); + + if (matcher) + { + matcher->type_enum = PT_TYPE_CHAR; + matcher->info.value.string_type = ' '; + matcher->info.value.data_value.str = + pt_append_bytes (this_parser, NULL, $3, strlen ($3)); + PT_NODE_PRINT_VALUE_TO_TEXT (this_parser, matcher); + } + + PT_NODE *expr = parser_make_expression (this_parser, PT_JSON_EXTRACT, $1, matcher, NULL); + $$ = expr; + DBG_PRINT}} + | identifier DOUBLE_RIGHT_ARROW CHAR_STRING + {{ + PT_NODE *matcher = parser_new_node (this_parser, PT_VALUE); + + if (matcher) + { + matcher->type_enum = PT_TYPE_CHAR; + matcher->info.value.string_type = ' '; + matcher->info.value.data_value.str = + pt_append_bytes (this_parser, NULL, $3, strlen ($3)); + PT_NODE_PRINT_VALUE_TO_TEXT (this_parser, matcher); + } + + PT_NODE *extract_expr = parser_make_expression (this_parser, PT_JSON_EXTRACT, $1, matcher, NULL); + PT_NODE *expr = parser_make_expression (this_parser, PT_JSON_EXTRACT, extract_expr, NULL, NULL); + + $$ = expr; + DBG_PRINT}} ; of_cume_dist_percent_rank_function @@ -18469,6 +19403,26 @@ subquery DBG_PRINT}} ; +subquery_without_subquery_and_with_clause + : '(' csql_query_without_subquery_and_with_clause ')' + {{ + + PT_NODE *stmt = $2; + + if (parser_within_join_condition) + { + PT_ERRORm (this_parser, stmt, MSGCAT_SET_PARSER_SYNTAX, + MSGCAT_SYNTAX_JOIN_COND_SUBQ); + } + + if (stmt) + stmt->info.query.is_subquery = PT_IS_SUBQUERY; + $$ = stmt; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + ; + path_expression : path_header path_dot NONE %dprec 6 @@ -21326,6 +22280,16 @@ identifier $$ = p; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | PATH + {{ + + PT_NODE *p = parser_new_node (this_parser, PT_NAME); + if (p) + p->info.name.original = $1; + $$ = p; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} | PERCENT_RANK {{ @@ -21905,6 +22869,26 @@ identifier $$ = p; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} + | JSON_ARRAYAGG + {{ + + PT_NODE *p = parser_new_node (this_parser, PT_NAME); + if (p) + p->info.name.original = $1; + $$ = p; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + + DBG_PRINT}} + | JSON_OBJECTAGG + {{ + + PT_NODE *p = parser_new_node (this_parser, PT_NAME); + if (p) + p->info.name.original = $1; + $$ = p; + PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) + DBG_PRINT}} /*}}}*/ | NTILE @@ -23026,7 +24010,8 @@ json_literal $$ = val; PARSER_SAVE_ERR_CONTEXT ($$, @$.buffer_pos) - DBG_PRINT}}; + DBG_PRINT}} + create_as_clause : opt_replace AS csql_query {{ @@ -23328,7 +24313,139 @@ vacuum_stmt $$ = node; DBG_PRINT}} ; - + +json_table_column_behavior_rule + : Null + {{ + $$.m_behavior = JSON_TABLE_RETURN_NULL; + $$.m_default_value = NULL; + DBG_PRINT}} + | ERROR_ + {{ + $$.m_behavior = JSON_TABLE_THROW_ERROR; + $$.m_default_value = NULL; + DBG_PRINT}} + | DEFAULT expression_ + {{ + PT_NODE * default_value = $2; + if (default_value->node_type != PT_VALUE) + { + PT_ERROR (this_parser, default_value, "invalid JSON_TABLE default"); + } + DB_VALUE * temp = pt_value_to_db (this_parser, default_value); + $$.m_behavior = JSON_TABLE_DEFAULT_VALUE; + $$.m_default_value = db_value_copy (temp); + + parser_free_node(this_parser, default_value); + DBG_PRINT}} + ; + +json_table_on_error_rule_optional + : /* empty */ + {{ + $$.m_behavior = JSON_TABLE_RETURN_NULL; + $$.m_default_value = NULL; + DBG_PRINT}} + | json_table_column_behavior_rule ON_ ERROR_ + {{ + $$ = $1; + DBG_PRINT}} + ; + +json_table_on_empty_rule_optional + : /* empty */ + {{ + $$.m_behavior = JSON_TABLE_RETURN_NULL; + $$.m_default_value = NULL; + DBG_PRINT}} + | json_table_column_behavior_rule ON_ EMPTY + {{ + $$ = $1; + DBG_PRINT}} + ; + +json_table_column_rule + : identifier For ORDINALITY + {{ + PT_NODE *pt_col = parser_new_node (this_parser, PT_JSON_TABLE_COLUMN); + pt_col->info.json_table_column_info.name = $1; + pt_col->info.json_table_column_info.func = JSON_TABLE_ORDINALITY; + pt_col->type_enum = PT_TYPE_INTEGER; + $$ = pt_col; + DBG_PRINT}} + | identifier data_type PATH CHAR_STRING json_table_on_error_rule_optional json_table_on_empty_rule_optional + // $1 $2 $3 $4 $5 $6 + {{ + PT_NODE *pt_col = parser_new_node (this_parser, PT_JSON_TABLE_COLUMN); + pt_col->info.json_table_column_info.name = $1; + pt_col->type_enum = TO_NUMBER (CONTAINER_AT_0 ($2)); + pt_col->data_type = CONTAINER_AT_1 ($2); + pt_col->info.json_table_column_info.path=$4; + pt_col->info.json_table_column_info.func = JSON_TABLE_EXTRACT; + pt_col->info.json_table_column_info.on_error = $5; + pt_col->info.json_table_column_info.on_empty = $6; + $$ = pt_col; + DBG_PRINT}} + | identifier data_type EXISTS PATH CHAR_STRING + {{ + PT_NODE *pt_col = parser_new_node (this_parser, PT_JSON_TABLE_COLUMN); + pt_col->info.json_table_column_info.name = $1; + pt_col->type_enum = TO_NUMBER (CONTAINER_AT_0 ($2)); + pt_col->data_type = CONTAINER_AT_1 ($2); + pt_col->info.json_table_column_info.path=$5; + pt_col->info.json_table_column_info.func = JSON_TABLE_EXISTS; + $$ = pt_col; + DBG_PRINT}} + | NESTED json_table_node_rule + {{ + $$ = $2; + DBG_PRINT}} + | NESTED PATH json_table_node_rule + {{ + $$ = $3; + DBG_PRINT}} + ; + +json_table_column_list_rule + : json_table_column_list_rule ',' json_table_column_rule + {{ + pt_jt_append_column_or_nested_node ($1, $3); + $$ = $1; + DBG_PRINT}} + | json_table_column_rule + {{ + PT_NODE *pt_jt_node = parser_new_node (this_parser, PT_JSON_TABLE_NODE); + pt_jt_append_column_or_nested_node (pt_jt_node, $1); + $$ = pt_jt_node; + DBG_PRINT}} + ; + +json_table_node_rule + : CHAR_STRING COLUMNS '(' json_table_column_list_rule ')' + {{ + PT_NODE *jt_node = $4; + assert (jt_node != NULL); + assert (jt_node->node_type == PT_JSON_TABLE_NODE); + + jt_node->info.json_table_node_info.path = $1; + + $$ = jt_node; + DBG_PRINT}} + ; + +json_table_rule + : {{ + json_table_column_count = 0; + DBG_PRINT}} + '(' expression_ ',' json_table_node_rule ')' + {{ + PT_NODE *jt = parser_new_node (this_parser, PT_JSON_TABLE); + jt->info.json_table_info.expr = $3; + jt->info.json_table_info.tree = $5; + + $$ = jt; + DBG_PRINT}} + ; %% @@ -23355,6 +24472,7 @@ int yycolumn_end = 0; int dot_flag = 0; int parser_function_code = PT_EMPTY; +size_t json_table_column_count = 0; static PT_NODE * parser_make_expr_with_func (PARSER_CONTEXT * parser, FUNC_TYPE func_code, @@ -25753,6 +26871,9 @@ parser_keyword_func (const char *name, PT_NODE * args) case PT_JSON_VALID: case PT_JSON_LENGTH: case PT_JSON_DEPTH: + case PT_JSON_PRETTY: + case PT_JSON_QUOTE: + case PT_JSON_UNQUOTE: if (c != 1) return NULL; @@ -25761,19 +26882,6 @@ parser_keyword_func (const char *name, PT_NODE * args) node = parser_make_expression (this_parser, key->op, a1, NULL, NULL); return node; - case PT_JSON_SEARCH: - if (c != 3) - return NULL; - - a1 = args; - a2 = a1->next; - a3 = a2->next; - a1->next = NULL; - a2->next = NULL; - a3->next = NULL; - - node = parser_make_expression (this_parser, key->op, a1, a2, a3); - return node; case PT_STRCMP: if (c != 2) return NULL; @@ -26302,11 +27410,9 @@ pt_set_collation_modifier (PARSER_CONTEXT *parser, PT_NODE *node, } else if (node->node_type == PT_EXPR) { - if (node->info.expr.op == PT_EVALUATE_VARIABLE - || node->info.expr.op == PT_DEFINE_VARIABLE) + if (node->info.expr.op == PT_EVALUATE_VARIABLE || node->info.expr.op == PT_DEFINE_VARIABLE) { - PT_ERRORm (parser, coll_node, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_COLLATE_NOT_ALLOWED); + PT_ERRORm (parser, coll_node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_COLLATE_NOT_ALLOWED); return node; } else if (node->info.expr.op == PT_CAST @@ -26315,8 +27421,7 @@ pt_set_collation_modifier (PARSER_CONTEXT *parser, PT_NODE *node, LANG_COLLATION *lc_node = lang_get_collation (PT_GET_COLLATION_MODIFIER (node)); if (lc_node->codeset != lang_coll->codeset) { - PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_CS_MATCH_COLLATE, + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_CS_MATCH_COLLATE, lang_get_codeset_name (lc_node->codeset), lang_get_codeset_name (lang_coll->codeset)); return node; @@ -26328,16 +27433,14 @@ pt_set_collation_modifier (PARSER_CONTEXT *parser, PT_NODE *node, do_wrap_with_cast = true; } } - else if (node->node_type == PT_NAME || node->node_type == PT_DOT_ - || node->node_type == PT_FUNCTION) + else if (node->node_type == PT_NAME || node->node_type == PT_DOT_ || node->node_type == PT_FUNCTION) { PT_SET_NODE_COLL_MODIFIER (node, lang_coll->coll.coll_id); do_wrap_with_cast = true; } else { - PT_ERRORm (parser, node, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_COLLATE_NOT_ALLOWED); + PT_ERRORm (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_COLLATE_NOT_ALLOWED); assert (do_wrap_with_cast == false); } @@ -26362,7 +27465,6 @@ pt_create_json_value (PARSER_CONTEXT *parser, const char *str) PT_NODE *node = NULL; node = parser_new_node (parser, PT_VALUE); - if (node) { node->type_enum = PT_TYPE_JSON; @@ -26373,3 +27475,23 @@ pt_create_json_value (PARSER_CONTEXT *parser, const char *str) return node; } + +static void +pt_jt_append_column_or_nested_node (PT_NODE * jt_node, PT_NODE * jt_col_or_nested) +{ + assert (jt_node != NULL && jt_node->node_type == PT_JSON_TABLE_NODE); + assert (jt_col_or_nested != NULL); + + if (jt_col_or_nested->node_type == PT_JSON_TABLE_COLUMN) + { + jt_col_or_nested->info.json_table_column_info.index = json_table_column_count++; + jt_node->info.json_table_node_info.columns = + parser_append_node (jt_col_or_nested, jt_node->info.json_table_node_info.columns); + } + else + { + assert (jt_col_or_nested->node_type == PT_JSON_TABLE_NODE); + jt_node->info.json_table_node_info.nested_paths = + parser_append_node (jt_col_or_nested, jt_node->info.json_table_node_info.nested_paths); + } +} diff --git a/src/parser/csql_lexer.l b/src/parser/csql_lexer.l index 0a4cb18a225..a2cc8095642 100644 --- a/src/parser/csql_lexer.l +++ b/src/parser/csql_lexer.l @@ -221,16 +221,6 @@ int yybuffer_pos; return COST; } [cC][oO][uU][nN][tT] { begin_token(yytext); return COUNT; } -[jJ][sS][oO][nN][_][oO][bB][jJ][eE][cC][tT] { begin_token(yytext); return FUN_JSON_OBJECT;} -[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY] { begin_token(yytext); return FUN_JSON_ARRAY;} -[jJ][sS][oO][nN][_][iI][nN][sS][eE][rR][tT] { begin_token(yytext); return FUN_JSON_INSERT;} -[jJ][sS][oO][nN][_][rR][eE][pP][lL][aA][cC][eE] { begin_token(yytext); return FUN_JSON_REPLACE;} -[jJ][sS][oO][nN][_][sS][eE][tT] { begin_token(yytext); return FUN_JSON_SET;} -[jJ][sS][oO][nN][_][kK][eE][yY][sS] { begin_token(yytext); return FUN_JSON_KEYS;} -[jJ][sS][oO][nN][_][rR][eE][mM][oO][vV][eE] { begin_token(yytext); return FUN_JSON_REMOVE;} -[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY][_][aA][pP][pP][eE][nN][dD] { begin_token(yytext); return FUN_JSON_ARRAY_APPEND;} -[jJ][sS][oO][nN][_][gG][eE][tT][_][aA][lL][lL][_][pP][aA][tT][hH][sS] { begin_token(yytext); return FUN_JSON_GET_ALL_PATHS;} -[jJ][sS][oO][nN][_][mM][eE][rR][gG][eE] { begin_token(yytext); return FUN_JSON_MERGE;} [cC][rR][eE][aA][tT][eE] { begin_token(yytext); return CREATE; } [cC][rR][oO][sS][sS] { begin_token(yytext); return CROSS; } [cC][rR][iI][tT][iI][cC][aA][lL] { begin_token(yytext); @@ -302,9 +292,11 @@ int yybuffer_pos; [eE][lL][tT] { begin_token(yytext); csql_yylval.cptr = pt_makename(yytext); return ELT; } +[eE][mM][pP][tT][yY] { begin_token(yytext); return EMPTY; } [eE][nN][dD] { begin_token(yytext); return END; } [eE][nN][uU][mM] { begin_token(yytext); return ENUM; } [eE][qQ][uU][aA][lL][sS] { begin_token(yytext); return EQUALS; } +[eE][rR][rR][oO][rR] { begin_token(yytext); return ERROR_; } [eE][sS][cC][aA][pP][eE] { begin_token(yytext); return ESCAPE; } [eE][vV][aA][lL][uU][aA][tT][eE] { begin_token(yytext); return EVALUATE; } [eE][xX][cC][eE][pP][tT] { begin_token(yytext); return EXCEPT; } @@ -445,9 +437,26 @@ int yybuffer_pos; [jJ][oO][bB] { begin_token(yytext); csql_yylval.cptr = pt_makename(yytext); return JOB; } -[jJ][sS][oO][nN] { begin_token(yytext); - return JSON; } -[kK][eE][yY] { begin_token(yytext); return KEY; } +[jJ][sS][oO][nN] { begin_token(yytext); return JSON; } +[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY] { begin_token(yytext); return FUN_JSON_ARRAY; } +[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY][_][aA][pP][pP][eE][nN][dD] { begin_token(yytext); return FUN_JSON_ARRAY_APPEND; } +[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY][aA][gG][gG] { begin_token(yytext); return JSON_ARRAYAGG; } +[jJ][sS][oO][nN][_][aA][rR][rR][aA][yY][_][iI][nN][sS][eE][rR][tT] { begin_token(yytext); return FUN_JSON_ARRAY_INSERT; } +[jJ][sS][oO][nN][_][cC][oO][nN][tT][aA][iI][nN][sS][_][pP][aA][tT][hH] { begin_token(yytext); return FUN_JSON_CONTAINS_PATH; } +[jJ][sS][oO][nN][_][gG][eE][tT][_][aA][lL][lL][_][pP][aA][tT][hH][sS] { begin_token(yytext); return FUN_JSON_GET_ALL_PATHS; } +[jJ][sS][oO][nN][_][iI][nN][sS][eE][rR][tT] { begin_token(yytext); return FUN_JSON_INSERT; } +[jJ][sS][oO][nN][_][kK][eE][yY][sS] { begin_token(yytext); return FUN_JSON_KEYS; } +[jJ][sS][oO][nN][_][mM][eE][rR][gG][eE] { begin_token(yytext); return FUN_JSON_MERGE; } +[jJ][sS][oO][nN][_][mM][eE][rR][gG][eE][_][pP][aA][tT][cC][hH] { begin_token(yytext); return FUN_JSON_MERGE_PATCH; } +[jJ][sS][oO][nN][_][mM][eE][rR][gG][eE][_][pP][rR][eE][sS][eE][rR][vV][eE] { begin_token(yytext); return FUN_JSON_MERGE_PRESERVE; } +[jJ][sS][oO][nN][_][oO][bB][jJ][eE][cC][tT] { begin_token(yytext); return FUN_JSON_OBJECT; } +[jJ][sS][oO][nN][_][oO][bB][jJ][eE][cC][tT][aA][gG][gG] { begin_token(yytext); return JSON_OBJECTAGG; } +[jJ][sS][oO][nN][_][rR][eE][mM][oO][vV][eE] { begin_token(yytext); return FUN_JSON_REMOVE; } +[jJ][sS][oO][nN][_][rR][eE][pP][lL][aA][cC][eE] { begin_token(yytext); return FUN_JSON_REPLACE; } +[jJ][sS][oO][nN][_][sS][eE][tT] { begin_token(yytext); return FUN_JSON_SET; } +[jJ][sS][oO][nN][_][sS][eE][aA][rR][cC][hH] { begin_token(yytext); return FUN_JSON_SEARCH; } +[jJ][sS][oO][nN][_][tT][aA][bB][lL][eE] { begin_token(yytext); return JSON_TABLE; } +[kK][eE][yY] { begin_token(yytext); return KEY; } [kK][eE][yY][sS] { begin_token(yytext); csql_yylval.cptr = pt_makename(yytext); return KEYS; } @@ -528,6 +537,7 @@ int yybuffer_pos; [nN][aA][tT][iI][oO][nN][aA][lL] { begin_token(yytext); return NATIONAL; } [nN][aA][tT][uU][rR][aA][lL] { begin_token(yytext); return NATURAL; } [nN][cC][hH][aA][rR] { begin_token(yytext); return NCHAR; } +[nN][eE][sS][tT][eE][dD] { begin_token(yytext); return NESTED; } [nN][eE][xX][tT] { begin_token(yytext); return NEXT; } [nN][oO] { begin_token(yytext); return NO; } [nN][oO][cC][aA][cC][hH][eE] { begin_token(yytext); @@ -573,6 +583,7 @@ int yybuffer_pos; [oO][pP][tT][iI][oO][nN] { begin_token(yytext); return OPTION; } [oO][rR] { begin_token(yytext); return OR; } [oO][rR][dD][eE][rR] { begin_token(yytext); return ORDER; } +[oO][rR][dD][iI][nN][aA][lL][iI][tT][yY] { begin_token(yytext); return ORDINALITY; } [oO][uU][tT] { begin_token(yytext); return OUT_; } [oO][uU][tT][eE][rR] { begin_token(yytext); return OUTER; } [oO][uU][tT][pP][uU][tT] { begin_token(yytext); return OUTPUT; } @@ -598,6 +609,9 @@ int yybuffer_pos; [pP][aA][sS][sS][wW][oO][rR][dD] { begin_token(yytext); csql_yylval.cptr = pt_makename(yytext); return PASSWORD; } +[pP][aA][tT][hH] { begin_token(yytext); + csql_yylval.cptr = pt_makename(yytext); + return PATH; } [pP][eE][rR][cC][eE][nN][tT][_][rR][aA][nN][kK] { begin_token(yytext); csql_yylval.cptr = pt_makename(yytext); return PERCENT_RANK; } @@ -904,6 +918,7 @@ int yybuffer_pos; "->" { begin_token(yytext); return RIGHT_ARROW; } "." { begin_token(yytext); return DOT; } +"->>" { begin_token(yytext); return DOUBLE_RIGHT_ARROW; } "||" { begin_token(yytext); if (!prm_get_bool_value (PRM_ID_PIPES_AS_CONCAT)) @@ -1575,7 +1590,7 @@ csql_yyerror (const char *s) } else { - msg = s; + msg = (char *) s; // suppress warning; string will not be changed } } diff --git a/src/parser/keyword.c b/src/parser/keyword.c index e76c0291e7c..02e1b89170f 100644 --- a/src/parser/keyword.c +++ b/src/parser/keyword.c @@ -343,6 +343,7 @@ static KEYWORD_RECORD keywords[] = { {PARTITION, "PARTITION", 0}, {PARTITIONING, "PARTITIONING", 1}, {PARTITIONS, "PARTITIONS", 1}, + {PATH, "PATH", 1}, {PASSWORD, "PASSWORD", 1}, {PERCENT_RANK, "PERCENT_RANK", 1}, {PERCENTILE_CONT, "PERCENTILE_CONT", 1}, diff --git a/src/parser/name_resolution.c b/src/parser/name_resolution.c index bbe7776f193..21d72b4cdfd 100644 --- a/src/parser/name_resolution.c +++ b/src/parser/name_resolution.c @@ -26,6 +26,7 @@ #include "config.h" #include +#include #include "porting.h" #include "error_manager.h" @@ -54,6 +55,7 @@ extern "C" { extern int parser_function_code; + extern size_t json_table_column_count; } #define PT_NAMES_HASH_SIZE 50 @@ -128,6 +130,10 @@ static int pt_find_name_in_spec (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NOD static int pt_check_unique_exposed (PARSER_CONTEXT * parser, const PT_NODE * p); static PT_NODE *pt_common_attribute (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE * q); static PT_NODE *pt_get_all_attributes_and_types (PARSER_CONTEXT * parser, PT_NODE * cls, PT_NODE * from); +static PT_NODE *pt_get_all_json_table_attributes_and_types (PARSER_CONTEXT * parser, PT_NODE * json_table_node, + const char *json_table_alias); +static PT_NODE *pt_json_table_gather_attribs (PARSER_CONTEXT * parser, PT_NODE * json_table_node, void *args, + int *continue_walk); static PT_NODE *pt_get_all_showstmt_attributes_and_types (PARSER_CONTEXT * parser, PT_NODE * derived_table); static void pt_get_attr_data_type (PARSER_CONTEXT * parser, DB_ATTRIBUTE * att, PT_NODE * attr); static PT_NODE *pt_unwhacked_spec (PARSER_CONTEXT * parser, PT_NODE * scope, PT_NODE * spec); @@ -175,6 +181,7 @@ static void pt_bind_names_merge_insert (PARSER_CONTEXT * parser, PT_NODE * node, static void pt_bind_names_merge_update (PARSER_CONTEXT * parser, PT_NODE * node, PT_BIND_NAMES_ARG * bind_arg, SCOPES * scopestack, PT_EXTRA_SPECS_FRAME * specs_frame); static const char *pt_get_unique_exposed_name (PARSER_CONTEXT * parser, PT_NODE * first_spec); +static PT_NODE *pt_bind_name_to_spec (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continue_walk); static PT_NODE *pt_resolve_natural_join (PARSER_CONTEXT * parser, PT_NODE * node, void *chk_parent, int *continue_walk); @@ -975,7 +982,19 @@ pt_bind_scope (PARSER_CONTEXT * parser, PT_BIND_NAMES_ARG * bind_arg) assert (!PT_SPEC_IS_ENTITY (spec) && !PT_SPEC_IS_CTE (spec)); table = spec->info.spec.derived_table; - table = parser_walk_tree (parser, table, pt_bind_names, bind_arg, pt_bind_names_post, bind_arg); + if (table->node_type == PT_JSON_TABLE) + { + assert (spec->info.spec.derived_table_type == PT_DERIVED_JSON_TABLE); + table->info.json_table_info.expr = + parser_walk_tree (parser, table->info.json_table_info.expr, pt_bind_names, bind_arg, pt_bind_names_post, + bind_arg); + table->info.json_table_info.tree = + parser_walk_tree (parser, table->info.json_table_info.tree, pt_bind_name_to_spec, spec, NULL, NULL); + } + else + { + table = parser_walk_tree (parser, table, pt_bind_names, bind_arg, pt_bind_names_post, bind_arg); + } spec->info.spec.derived_table = table; /* must bind any expr types in table. pt_bind_types requires it. */ @@ -2569,6 +2588,17 @@ pt_bind_names (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continue case PT_UPDATE: scopestack.specs = node->info.update.spec; + spec_frame.next = bind_arg->spec_frames; + spec_frame.extra_specs = NULL; + + /* break links to current scopes to bind_names in the WITH_CLAUSE */ + bind_arg->scopes = NULL; + bind_arg->spec_frames = NULL; + pt_bind_names_in_with_clause (parser, node, bind_arg); + + bind_arg->spec_frames = spec_frame.next; + + /* restore links to current scopes */ bind_arg->scopes = &scopestack; spec_frame.next = bind_arg->spec_frames; spec_frame.extra_specs = NULL; @@ -2592,6 +2622,16 @@ pt_bind_names (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continue case PT_DELETE: scopestack.specs = node->info.delete_.spec; + spec_frame.next = bind_arg->spec_frames; + spec_frame.extra_specs = NULL; + + /* break links to current scopes to bind_names in the WITH_CLAUSE */ + bind_arg->scopes = NULL; + bind_arg->spec_frames = NULL; + pt_bind_names_in_with_clause (parser, node, bind_arg); + + bind_arg->spec_frames = spec_frame.next; + bind_arg->scopes = &scopestack; spec_frame.next = bind_arg->spec_frames; spec_frame.extra_specs = NULL; @@ -4325,6 +4365,77 @@ pt_get_all_attributes_and_types (PARSER_CONTEXT * parser, PT_NODE * cls, PT_NODE return NULL; } +static PT_NODE * +pt_json_table_gather_attribs (PARSER_CONTEXT * parser, PT_NODE * json_table_column, void *args, int *continue_walk) +{ + PT_NODE **attribs = (PT_NODE **) args; + + if (json_table_column->node_type == PT_JSON_TABLE_COLUMN) + { + PT_NODE *next_attr = json_table_column->info.json_table_column_info.name; + next_attr->type_enum = json_table_column->type_enum; + next_attr->info.name.json_table_column_index = json_table_column->info.json_table_column_info.index; + if (json_table_column->data_type != NULL) + { + next_attr->data_type = parser_copy_tree (parser, json_table_column->data_type); + } + *attribs = parser_append_node (next_attr, *attribs); + } + + return json_table_column; +} + +static PT_NODE * +pt_get_all_json_table_attributes_and_types (PARSER_CONTEXT * parser, PT_NODE * json_table_node, + const char *json_table_alias) +{ + PT_NODE *attribs = NULL; + PT_NODE *copy_node = NULL; + + parser_walk_tree (parser, json_table_node, pt_json_table_gather_attribs, &attribs, NULL, NULL); + + if (attribs == NULL) + { + assert (false); + return NULL; + } + + // *INDENT-OFF* + std::unordered_map sorted_attrs; + // *INDENT-ON* + + for (PT_NODE * attr = attribs; attr; attr = attr->next) + { + size_t index = attr->info.name.json_table_column_index; + assert (strcmp (json_table_alias, attr->info.name.resolved) == 0); + + // we need copies of the actual names + copy_node = pt_name (parser, attr->info.name.original); + copy_node->type_enum = attr->type_enum; + copy_node->info.name.resolved = json_table_alias; + if (attr->data_type != NULL) + { + copy_node->data_type = parser_copy_tree (parser, attr->data_type); + } + sorted_attrs[index] = copy_node; // we have to copy, cannot use same node + } + + size_t columns_nr = sorted_attrs.size (); + + for (unsigned int i = 0; i < columns_nr - 1; i++) + { + if (sorted_attrs[i] == NULL) + { + assert (false); + return NULL; + } + sorted_attrs[i]->next = sorted_attrs[i + 1]; + } + sorted_attrs[columns_nr - 1]->next = NULL; + + return sorted_attrs[0]; +} + /* * pt_get_all_showstmt_attributes_and_types () - * return: show list attributes list if all OK, NULL otherwise. @@ -6844,14 +6955,11 @@ pt_resolve_using_index (PARSER_CONTEXT * parser, PT_NODE * index, PT_NODE * from pt_short_print (parser, index)); return NULL; } - else if (cons->index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS) - { - // TODO: raise an error? - return NULL; // unusable index - } - else if (cons->index_status == SM_INVISIBLE_INDEX) + else if (cons->index_status == SM_ONLINE_INDEX_BUILDING_IN_PROGRESS + || cons->index_status == SM_INVISIBLE_INDEX) { - // TODO: raise an error? + PT_ERRORmf (parser, index, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_USING_INDEX_ERR_1, + pt_short_print (parser, index)); return NULL; // unusable index } } @@ -8119,6 +8227,7 @@ PT_NODE * pt_resolve_cte_specs (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continue_walk) { PT_NODE *cte_list, *with = NULL, *saved_with = NULL; + PT_NODE **with_p; PT_NODE *curr_cte, *previous_cte; PT_NODE *saved_curr_cte_next; int nested_with_count = 0; @@ -8133,6 +8242,15 @@ pt_resolve_cte_specs (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *c case PT_DIFFERENCE: case PT_INTERSECTION: with = node->info.query.with; + with_p = &node->info.query.with; + break; + case PT_UPDATE: + with = node->info.update.with; + with_p = &node->info.update.with; + break; + case PT_DELETE: + with = node->info.delete_.with; + with_p = &node->info.delete_.with; break; default: @@ -8301,10 +8419,10 @@ pt_resolve_cte_specs (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *c } /* STEP 3: Resolve CTEs in the actual query */ - saved_with = with; - node->info.query.with = NULL; + saved_with = *with_p; + *with_p = NULL; node = parser_walk_tree (parser, node, pt_resolve_spec_to_cte, cte_list, NULL, NULL); - node->info.query.with = saved_with; + *with_p = saved_with; /* all ok */ return node; @@ -9560,12 +9678,27 @@ pt_set_reserved_name_key_type (PARSER_CONTEXT * parser, PT_NODE * node, void *ar static void pt_bind_names_in_with_clause (PARSER_CONTEXT * parser, PT_NODE * node, PT_BIND_NAMES_ARG * bind_arg) { - PT_NODE *with; + PT_NODE *with = NULL; PT_NODE *curr_cte; - assert (PT_IS_QUERY_NODE_TYPE (node->node_type)); + switch (node->node_type) + { + case PT_SELECT: + case PT_UNION: + case PT_DIFFERENCE: + case PT_INTERSECTION: + with = node->info.query.with; + break; + case PT_UPDATE: + with = node->info.update.with; + break; + case PT_DELETE: + with = node->info.delete_.with; + break; + default: + assert (false); + } - with = node->info.query.with; if (with == NULL) { /* nothing to do */ @@ -9752,56 +9885,61 @@ pt_get_attr_list_of_derived_table (PARSER_CONTEXT * parser, PT_MISC_TYPE derived break; case PT_IS_SUBQUERY: - { - /* must be a subquery derived table */ - /* select_list must have passed star expansion */ - PT_NODE *att, *col; + /* must be a subquery derived table */ + /* select_list must have passed star expansion */ + PT_NODE * att, *col; - select_list = pt_get_select_list (parser, derived_table); - if (!select_list) - { - return NULL; - } + select_list = pt_get_select_list (parser, derived_table); + if (!select_list) + { + return NULL; + } - for (att = select_list, i = 0; att; att = att->next, i++) - { - if (att->alias_print) - { - col = pt_name (parser, att->alias_print); - } - else - { - if (att->node_type == PT_NAME && att->info.name.original != NULL && att->info.name.original[0] != '\0') - { - col = pt_name (parser, att->info.name.original); - } - else if (att->node_type == PT_VALUE && att->info.value.text != NULL && att->info.value.text[0] != '\0') - { - col = pt_name (parser, att->info.value.text); - } - else if (att->node_type == PT_EXPR || att->node_type == PT_FUNCTION) - { - PARSER_VARCHAR *alias; - alias = pt_print_bytes (parser, att); - col = pt_name (parser, (const char *) alias->bytes); - } - else - { /* generate column name */ - id = i; - col = pt_name (parser, mq_generate_name (parser, derived_alias->info.name.original, &id)); - } - } + for (att = select_list, i = 0; att; att = att->next, i++) + { + if (att->alias_print) + { + col = pt_name (parser, att->alias_print); + } + else + { + if (att->node_type == PT_NAME && att->info.name.original != NULL && att->info.name.original[0] != '\0') + { + col = pt_name (parser, att->info.name.original); + } + else if (att->node_type == PT_VALUE && att->info.value.text != NULL && att->info.value.text[0] != '\0') + { + col = pt_name (parser, att->info.value.text); + } + else if (att->node_type == PT_EXPR || att->node_type == PT_FUNCTION) + { + PARSER_VARCHAR *alias; + alias = pt_print_bytes (parser, att); + col = pt_name (parser, (const char *) alias->bytes); + } + else + { /* generate column name */ + id = i; + col = pt_name (parser, mq_generate_name (parser, derived_alias->info.name.original, &id)); + } + } - col->type_enum = att->type_enum; - if (att->data_type) - { - col->data_type = parser_copy_tree_list (parser, att->data_type); - } + col->type_enum = att->type_enum; + if (att->data_type) + { + col->data_type = parser_copy_tree_list (parser, att->data_type); + } - as_attr_list = parser_append_node (col, as_attr_list); - } - break; - } + as_attr_list = parser_append_node (col, as_attr_list); + } + break; + + case PT_DERIVED_JSON_TABLE: + assert (derived_table->node_type == PT_JSON_TABLE); + + as_attr_list = pt_get_all_json_table_attributes_and_types (parser, derived_table, + derived_alias->info.name.original); + break; default: /* this can't happen since we removed MERGE/CSELECT from grammar */ @@ -9947,6 +10085,10 @@ pt_set_attr_list_types (PARSER_CONTEXT * parser, PT_NODE * as_attr_list, PT_MISC } break; + case PT_DERIVED_JSON_TABLE: + // nothing to do? Types already set during pt_json_table_gather_attribs () + return; + default: /* this can't happen since we removed MERGE/CSELECT from grammar */ assert (derived_table_type == PT_IS_CSELECT); @@ -9976,3 +10118,26 @@ pt_count_with_clauses (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int * return node; } + +static PT_NODE * +pt_bind_name_to_spec (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continue_walk) +{ + PT_NODE *spec = REINTERPRET_CAST (PT_NODE *, arg); + if (spec->node_type != PT_SPEC) + { + assert (false); + return node; + } + + if (node->node_type != PT_NAME) + { + // not relevant + return node; + } + + assert (!pt_resolved (node)); + node->info.name.spec_id = spec->info.spec.id; + node->info.name.resolved = spec->info.spec.range_var->info.name.original; + node->info.name.meta_class = PT_NORMAL; // so far, only normals are used. + return node; +} diff --git a/src/parser/parse_dbi.c b/src/parser/parse_dbi.c index 335206fa6bc..fee2fad0cb8 100644 --- a/src/parser/parse_dbi.c +++ b/src/parser/parse_dbi.c @@ -3549,3 +3549,28 @@ pt_db_value_initialize (PARSER_CONTEXT * parser, PT_NODE * value, DB_VALUE * db_ return db_value; } + +/* + * db_json_val_from_str() - create JSON value from string + * return: error code + * raw_str(in): buffer storing a JSON + * str_size(in): size of buffer + * json_val(out): output JSON DB_VALUE + */ +int +db_json_val_from_str (const char *raw_str, const int str_size, DB_VALUE * json_val) +{ + JSON_DOC *json_doc = NULL; + int error_code = NO_ERROR; + + error_code = db_json_get_json_from_str (raw_str, json_doc, str_size); + if (error_code != NO_ERROR) + { + assert (json_doc == NULL); + return error_code; + } + + db_make_json (json_val, json_doc, true); + + return error_code; +} diff --git a/src/parser/parse_tree.h b/src/parser/parse_tree.h index 29f96d4d9e6..0a46eab223c 100644 --- a/src/parser/parse_tree.h +++ b/src/parser/parse_tree.h @@ -34,14 +34,15 @@ #include #include +#include "authenticate.h" +#include "compile_context.h" #include "config.h" -#include "jansson.h" #include "cursor.h" -#include "string_opfunc.h" +#include "jansson.h" +#include "json_table_def.h" #include "message_catalog.h" -#include "authenticate.h" +#include "string_opfunc.h" #include "system_parameter.h" -#include "xasl.h" #define MAX_PRINT_ERROR_CONTEXT_LENGTH 64 @@ -896,6 +897,9 @@ enum pt_node_type PT_KILL_STMT, PT_VACUUM, PT_WITH_CLAUSE, + PT_JSON_TABLE, + PT_JSON_TABLE_NODE, + PT_JSON_TABLE_COLUMN, PT_NODE_NUMBER, /* This is the number of node types */ PT_LAST_NODE_NUMBER = PT_NODE_NUMBER @@ -1138,7 +1142,11 @@ typedef enum PT_IS_SHOWSTMT, /* query is SHOWSTMT */ PT_IS_CTE_REC_SUBQUERY, - PT_IS_CTE_NON_REC_SUBQUERY + PT_IS_CTE_NON_REC_SUBQUERY, + + PT_DERIVED_JSON_TABLE, // json table spec derivation + + // todo: separate into relevant enumerations } PT_MISC_TYPE; /* Enumerated join type */ @@ -1481,7 +1489,9 @@ typedef enum PT_JSON_VALID, PT_JSON_LENGTH, PT_JSON_DEPTH, - PT_JSON_SEARCH, + PT_JSON_QUOTE, + PT_JSON_UNQUOTE, + PT_JSON_PRETTY, /* This is the last entry. Please add a new one before it. */ PT_LAST_OPCODE @@ -1687,6 +1697,10 @@ typedef struct pt_set_timezone_info PT_SET_TIMEZONE_INFO; typedef struct pt_flat_spec_info PT_FLAT_SPEC_INFO; +typedef struct pt_json_table_info PT_JSON_TABLE_INFO; +typedef struct pt_json_table_node_info PT_JSON_TABLE_NODE_INFO; +typedef struct pt_json_table_column_info PT_JSON_TABLE_COLUMN_INFO; + typedef PT_NODE *(*PT_NODE_FUNCTION) (PARSER_CONTEXT * p, PT_NODE * tree, void *arg); typedef PT_NODE *(*PT_NODE_WALK_FUNCTION) (PARSER_CONTEXT * p, PT_NODE * tree, void *arg, int *continue_walk); @@ -2086,6 +2100,7 @@ struct pt_delete_info PT_NODE *limit; /* PT_VALUE limit clause parameter */ PT_NODE *del_stmt_list; /* list of DELETE statements after split */ PT_HINT_ENUM hint; /* hint flag */ + PT_NODE *with; /* PT_WITH_CLAUSE */ unsigned has_trigger:1; /* whether it has triggers */ unsigned server_delete:1; /* whether it can be server-side deletion */ unsigned rewrite_limit:1; /* need to rewrite the limit clause */ @@ -2154,6 +2169,7 @@ struct pt_spec_info PT_NODE *flat_entity_list; /* PT_NAME (list) resolved class's */ PT_NODE *method_list; /* PT_METHOD_CALL list with this entity as the target */ PT_NODE *partition; /* PT_NAME of the specified partition */ + PT_NODE *json_table; /* JSON TABLE definition tree */ UINTPTR id; /* entity spec unique id # */ PT_MISC_TYPE only_all; /* PT_ONLY or PT_ALL */ PT_MISC_TYPE meta_class; /* enum 0 or PT_META */ @@ -2592,6 +2608,7 @@ struct pt_name_info PT_NODE *indx_key_limit; /* key limits for index name */ int coll_modifier; /* collation modifier = collation + 1 */ PT_RESERVED_NAME_ID reserved_id; /* used to identify reserved name */ + size_t json_table_column_index; /* will be used only for json_table to gather attributes in the correct order */ }; /* @@ -2724,26 +2741,26 @@ struct pt_select_info unsigned single_table_opt:1; /* hq optimized for single table */ }; -#define PT_SELECT_INFO_ANSI_JOIN 1 /* has ANSI join? */ -#define PT_SELECT_INFO_ORACLE_OUTER 2 /* has Oracle's outer join operator? */ -#define PT_SELECT_INFO_DUMMY 4 /* is dummy (i.e., 'SELECT * FROM x') ? */ -#define PT_SELECT_INFO_HAS_AGG 8 /* has any type of aggregation? */ -#define PT_SELECT_INFO_HAS_ANALYTIC 16 /* has analytic functions */ -#define PT_SELECT_INFO_MULTI_UPDATE_AGG 32 /* is query for multi-table update using aggregate */ -#define PT_SELECT_INFO_IDX_SCHEMA 64 /* is show index query */ -#define PT_SELECT_INFO_COLS_SCHEMA 128 /* is show columns query */ -#define PT_SELECT_FULL_INFO_COLS_SCHEMA 256 /* is show columns query */ -#define PT_SELECT_INFO_IS_MERGE_QUERY 512 /* is a query of a merge stmt */ -#define PT_SELECT_INFO_LIST_PUSHER 1024 /* dummy subquery that pushes a list file descriptor to be used at - * server as its own result */ -#define PT_SELECT_INFO_NO_STRICT_OID_CHECK 2048 /* normally, only OIDs of updatable views are allowed in parse +#define PT_SELECT_INFO_ANSI_JOIN 0x01 /* has ANSI join? */ +#define PT_SELECT_INFO_ORACLE_OUTER 0x02 /* has Oracle's outer join operator? */ +#define PT_SELECT_INFO_DUMMY 0x04 /* is dummy (i.e., 'SELECT * FROM x') ? */ +#define PT_SELECT_INFO_HAS_AGG 0x08 /* has any type of aggregation? */ +#define PT_SELECT_INFO_HAS_ANALYTIC 0x10 /* has analytic functions */ +#define PT_SELECT_INFO_MULTI_UPDATE_AGG 0x20 /* is query for multi-table update using aggregate */ +#define PT_SELECT_INFO_IDX_SCHEMA 0x40 /* is show index query */ +#define PT_SELECT_INFO_COLS_SCHEMA 0x80 /* is show columns query */ +#define PT_SELECT_FULL_INFO_COLS_SCHEMA 0x0100 /* is show columns query */ +#define PT_SELECT_INFO_IS_MERGE_QUERY 0x0200 /* is a query of a merge stmt */ +#define PT_SELECT_INFO_LIST_PUSHER 0x0400 /* dummy subquery that pushes a list file descriptor to be used at + * server as its own result */ +#define PT_SELECT_INFO_NO_STRICT_OID_CHECK 0x0800 /* normally, only OIDs of updatable views are allowed in parse * trees; however, for MERGE and UPDATE we sometimes want to * allow OIDs of partially updatable views */ -#define PT_SELECT_INFO_IS_UPD_DEL_QUERY 4096 /* set if select was built for an UPDATE or DELETE statement */ -#define PT_SELECT_INFO_FOR_UPDATE 8192 /* FOR UPDATE clause is active */ -#define PT_SELECT_INFO_DISABLE_LOOSE_SCAN 16384 /* loose scan not possible on query */ -#define PT_SELECT_INFO_MVCC_LOCK_NEEDED 32768 /* lock returned rows */ -#define PT_SELECT_INFO_READ_ONLY 65536 /* read-only system generated queries like show statement */ +#define PT_SELECT_INFO_IS_UPD_DEL_QUERY 0x1000 /* set if select was built for an UPDATE or DELETE statement */ +#define PT_SELECT_INFO_FOR_UPDATE 0x2000 /* FOR UPDATE clause is active */ +#define PT_SELECT_INFO_DISABLE_LOOSE_SCAN 0x4000 /* loose scan not possible on query */ +#define PT_SELECT_INFO_MVCC_LOCK_NEEDED 0x8000 /* lock returned rows */ +#define PT_SELECT_INFO_READ_ONLY 0x010000 /* read-only system generated queries like show statement */ #define PT_SELECT_INFO_IS_FLAGED(s, f) \ ((s)->info.query.q.select.flag & (f)) @@ -2886,6 +2903,7 @@ struct pt_update_info PT_NODE *order_by; /* PT_EXPR (list) */ PT_NODE *orderby_for; /* PT_EXPR */ PT_HINT_ENUM hint; /* hint flag */ + PT_NODE *with; /* PT_WITH_CLAUSE */ unsigned has_trigger:1; /* whether it has triggers */ unsigned has_unique:1; /* whether there's unique constraint */ unsigned server_update:1; /* whether it can be server-side update */ @@ -3243,6 +3261,31 @@ struct pt_insert_value_info int replace_names; /* true if names in evaluated node need to be replaced */ }; +struct pt_json_table_column_info +{ + PT_NODE *name; + // domain is stored in parser node + char *path; + size_t index; // will be used to store the columns in the correct order + enum json_table_column_function func; + struct json_table_column_behavior on_error; + struct json_table_column_behavior on_empty; +}; + +struct pt_json_table_node_info +{ + PT_NODE *columns; + PT_NODE *nested_paths; + const char *path; +}; + +struct pt_json_table_info +{ + PT_NODE *expr; + PT_NODE *tree; + bool is_correlated; +}; + /* Info field of the basic NODE If 'xyz' is the name of the field, then the structure type should be struct PT_XYZ_INFO xyz; @@ -3296,6 +3339,9 @@ union pt_statement_info PT_INSERT_INFO insert; PT_INSERT_VALUE_INFO insert_value; PT_ISOLATION_LVL_INFO isolation_lvl; + PT_JSON_TABLE_INFO json_table_info; + PT_JSON_TABLE_NODE_INFO json_table_node_info; + PT_JSON_TABLE_COLUMN_INFO json_table_column_info; PT_MERGE_INFO merge; PT_METHOD_CALL_INFO method_call; PT_METHOD_DEF_INFO method_def; diff --git a/src/parser/parse_tree_cl.c b/src/parser/parse_tree_cl.c index a74d2be2711..cc53a1e0bd7 100644 --- a/src/parser/parse_tree_cl.c +++ b/src/parser/parse_tree_cl.c @@ -247,6 +247,9 @@ static PT_NODE *pt_apply_kill (PARSER_CONTEXT * parser, PT_NODE * P, PT_NODE_FUN static PT_NODE *pt_apply_vacuum (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); static PT_NODE *pt_apply_with_clause (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); static PT_NODE *pt_apply_cte (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); +static PT_NODE *pt_apply_json_table (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); +static PT_NODE *pt_apply_json_table_node (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); +static PT_NODE *pt_apply_json_table_column (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); static PARSER_APPLY_NODE_FUNC pt_apply_func_array[PT_NODE_NUMBER]; @@ -348,6 +351,9 @@ static PT_NODE *pt_init_kill (PT_NODE * p); static PT_NODE *pt_init_vacuum (PT_NODE * p); static PT_NODE *pt_init_with_clause (PT_NODE * p); static PT_NODE *pt_init_cte (PT_NODE * p); +static PT_NODE *pt_init_json_table (PT_NODE * p); +static PT_NODE *pt_init_json_table_node (PT_NODE * p); +static PT_NODE *pt_init_json_table_column (PT_NODE * p); static PARSER_INIT_NODE_FUNC pt_init_func_array[PT_NODE_NUMBER]; @@ -455,6 +461,9 @@ static PARSER_VARCHAR *pt_print_insert_value (PARSER_CONTEXT * parser, PT_NODE * static PARSER_VARCHAR *pt_print_vacuum (PARSER_CONTEXT * parser, PT_NODE * p); static PARSER_VARCHAR *pt_print_with_clause (PARSER_CONTEXT * parser, PT_NODE * p); static PARSER_VARCHAR *pt_print_cte (PARSER_CONTEXT * parser, PT_NODE * p); +static PARSER_VARCHAR *pt_print_json_table (PARSER_CONTEXT * parser, PT_NODE * p); +static PARSER_VARCHAR *pt_print_json_table_node (PARSER_CONTEXT * parser, PT_NODE * p); +static PARSER_VARCHAR *pt_print_json_table_columns (PARSER_CONTEXT * parser, PT_NODE * p); #if defined(ENABLE_UNUSED_FUNCTION) static PT_NODE *pt_apply_use (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg); static PT_NODE *pt_init_use (PT_NODE * p); @@ -464,6 +473,13 @@ static PARSER_VARCHAR *pt_print_use (PARSER_CONTEXT * parser, PT_NODE * p); static int parser_print_user (char *user_text, int len); static void pt_clean_tree_copy_info (PT_TREE_COPY_INFO * tree_copy_info); +static const char *pt_json_table_column_behavior_to_string (const json_table_column_behavior_type & behavior_type); +static PARSER_VARCHAR *pt_print_json_table_column_error_or_empty_behavior (PARSER_CONTEXT * parser, + PARSER_VARCHAR * pstr, + const struct json_table_column_behavior + &column_behavior); +static PARSER_VARCHAR *pt_print_json_table_column_info (PARSER_CONTEXT * parser, PT_NODE * p, PARSER_VARCHAR * pstr); + static PARSER_PRINT_NODE_FUNC pt_print_func_array[PT_NODE_NUMBER]; @@ -2534,7 +2550,7 @@ pt_print_db_value (PARSER_CONTEXT * parser, const struct db_value * val) for (; dim < block.dim + len; dim *= 2) //calc next power of 2 >= b.dim+len ; - mem::block b { dim, (char *) parser_alloc (parser, dim) }; + mem::block b { dim, (char *) parser_alloc (parser, (const int) dim) }; memcpy (b.ptr, block.ptr, block.dim); block = std::move (b); }, [](mem::block &block) @@ -3936,12 +3952,16 @@ pt_show_binopcode (PT_OP_TYPE n) return "json_extract"; case PT_JSON_VALID: return "json_valid"; + case PT_JSON_QUOTE: + return "json_quote"; + case PT_JSON_UNQUOTE: + return "json_unquote"; case PT_JSON_LENGTH: return "json_length"; case PT_JSON_DEPTH: return "json_depth"; - case PT_JSON_SEARCH: - return "json_search"; + case PT_JSON_PRETTY: + return "json_pretty"; default: return "unknown opcode"; } @@ -4019,6 +4039,10 @@ pt_show_function (FUNC_TYPE c) return "percentile_cont"; case PT_PERCENTILE_DISC: return "percentile_disc"; + case PT_JSON_ARRAYAGG: + return "json_arrayagg"; + case PT_JSON_OBJECTAGG: + return "json_objectagg"; case F_SEQUENCE: return "sequence"; @@ -4057,8 +4081,16 @@ pt_show_function (FUNC_TYPE c) return "json_remove"; case F_JSON_ARRAY_APPEND: return "json_array_append"; + case F_JSON_ARRAY_INSERT: + return "json_array_insert"; + case F_JSON_SEARCH: + return "json_search"; + case F_JSON_CONTAINS_PATH: + return "json_contains_path"; case F_JSON_MERGE: return "json_merge"; + case F_JSON_MERGE_PATCH: + return "json_merge_patch"; case F_JSON_GET_ALL_PATHS: return "json_get_all_paths"; default: @@ -5089,6 +5121,9 @@ pt_init_apply_f (void) pt_apply_func_array[PT_VACUUM] = pt_apply_vacuum; pt_apply_func_array[PT_WITH_CLAUSE] = pt_apply_with_clause; pt_apply_func_array[PT_CTE] = pt_apply_cte; + pt_apply_func_array[PT_JSON_TABLE] = pt_apply_json_table; + pt_apply_func_array[PT_JSON_TABLE_NODE] = pt_apply_json_table_node; + pt_apply_func_array[PT_JSON_TABLE_COLUMN] = pt_apply_json_table_column; pt_apply_f = pt_apply_func_array; } @@ -5205,6 +5240,9 @@ pt_init_init_f (void) pt_init_func_array[PT_VACUUM] = pt_init_vacuum; pt_init_func_array[PT_WITH_CLAUSE] = pt_init_with_clause; pt_init_func_array[PT_CTE] = pt_init_cte; + pt_init_func_array[PT_JSON_TABLE] = pt_init_json_table; + pt_init_func_array[PT_JSON_TABLE_NODE] = pt_init_json_table_node; + pt_init_func_array[PT_JSON_TABLE_COLUMN] = pt_init_json_table_column; pt_init_f = pt_init_func_array; } @@ -5319,6 +5357,9 @@ pt_init_print_f (void) pt_print_func_array[PT_VACUUM] = pt_print_vacuum; pt_print_func_array[PT_WITH_CLAUSE] = pt_print_with_clause; pt_print_func_array[PT_CTE] = pt_print_cte; + pt_print_func_array[PT_JSON_TABLE] = pt_print_json_table; + pt_print_func_array[PT_JSON_TABLE_NODE] = pt_print_json_table_node; + pt_print_func_array[PT_JSON_TABLE_COLUMN] = pt_print_json_table_columns; pt_print_f = pt_print_func_array; } @@ -8712,6 +8753,7 @@ pt_print_datatype (PARSER_CONTEXT * parser, PT_NODE * p) static PT_NODE * pt_apply_delete (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg) { + p->info.delete_.with = g (parser, p->info.delete_.with, arg); p->info.delete_.target_classes = g (parser, p->info.delete_.target_classes, arg); p->info.delete_.spec = g (parser, p->info.delete_.spec, arg); p->info.delete_.search_cond = g (parser, p->info.delete_.search_cond, arg); @@ -8760,6 +8802,12 @@ pt_print_delete (PARSER_CONTEXT * parser, PT_NODE * p) r1 = pt_print_bytes_l (parser, p->info.delete_.target_classes); r2 = pt_print_bytes_spec_list (parser, p->info.delete_.spec); + if (p->info.delete_.with != NULL) + { + r1 = pt_print_bytes_l (parser, p->info.delete_.with); + q = pt_append_varchar (parser, q, r1); + } + q = pt_append_nulstring (parser, q, "delete "); if (p->info.delete_.hint != PT_HINT_NONE) { @@ -9557,6 +9605,20 @@ pt_print_spec (PARSER_CONTEXT * parser, PT_NODE * p) /* skip unnecessary nested parenthesis of derived-query */ q = pt_append_varchar (parser, q, r1); } + else if (p->info.spec.derived_table_type == PT_DERIVED_JSON_TABLE) + { + q = pt_append_nulstring (parser, q, "("); + q = pt_append_varchar (parser, q, r1); + + unsigned int alias_print_flag = (parser->custom_print & PT_PRINT_ALIAS); + q = pt_append_nulstring (parser, q, " as "); + parser->custom_print &= ~PT_PRINT_ALIAS; + r1 = pt_print_bytes (parser, p->info.spec.range_var); + q = pt_append_varchar (parser, q, r1); + parser->custom_print |= alias_print_flag; + + q = pt_append_nulstring (parser, q, ")"); + } else { q = pt_append_nulstring (parser, q, "("); @@ -9566,7 +9628,7 @@ pt_print_spec (PARSER_CONTEXT * parser, PT_NODE * p) } } - if (!(parser->custom_print & PT_SUPPRESS_RESOLVED)) + if (!(parser->custom_print & PT_SUPPRESS_RESOLVED) && (p->info.spec.derived_table_type != PT_DERIVED_JSON_TABLE)) { save_custom = parser->custom_print; parser->custom_print |= PT_SUPPRESS_META_ATTR_CLASS; @@ -9580,7 +9642,7 @@ pt_print_spec (PARSER_CONTEXT * parser, PT_NODE * p) } parser->custom_print = save_custom; } - if (p->info.spec.as_attr_list && !PT_SPEC_IS_CTE (p)) + if (p->info.spec.as_attr_list && !PT_SPEC_IS_CTE (p) && (p->info.spec.derived_table_type != PT_DERIVED_JSON_TABLE)) { save_custom = parser->custom_print; parser->custom_print |= PT_SUPPRESS_RESOLVED; @@ -10170,6 +10232,20 @@ pt_print_expr (PARSER_CONTEXT * parser, PT_NODE * p) q = pt_append_varchar (parser, q, r1); q = pt_append_nulstring (parser, q, ")"); break; + case PT_JSON_QUOTE: + r1 = pt_print_bytes (parser, p->info.expr.arg1); + + q = pt_append_nulstring (parser, q, " json_quote("); + q = pt_append_varchar (parser, q, r1); + q = pt_append_nulstring (parser, q, ")"); + break; + case PT_JSON_UNQUOTE: + r1 = pt_print_bytes (parser, p->info.expr.arg1); + + q = pt_append_nulstring (parser, q, " json_unquote("); + q = pt_append_varchar (parser, q, r1); + q = pt_append_nulstring (parser, q, ")"); + break; case PT_JSON_LENGTH: r1 = pt_print_bytes (parser, p->info.expr.arg1); @@ -10191,16 +10267,11 @@ pt_print_expr (PARSER_CONTEXT * parser, PT_NODE * p) q = pt_append_varchar (parser, q, r1); q = pt_append_nulstring (parser, q, ")"); break; - case PT_JSON_SEARCH: - q = pt_append_nulstring (parser, q, "json_search("); + case PT_JSON_PRETTY: r1 = pt_print_bytes (parser, p->info.expr.arg1); + + q = pt_append_nulstring (parser, q, " json_pretty("); q = pt_append_varchar (parser, q, r1); - q = pt_append_nulstring (parser, q, ", "); - r2 = pt_print_bytes (parser, p->info.expr.arg2); - q = pt_append_varchar (parser, q, r2); - q = pt_append_nulstring (parser, q, ", "); - r3 = pt_print_bytes (parser, p->info.expr.arg3); - q = pt_append_varchar (parser, q, r3); q = pt_append_nulstring (parser, q, ")"); break; case PT_POWER: @@ -12414,7 +12485,16 @@ pt_print_function (PARSER_CONTEXT * parser, PT_NODE * p) { if (code == PT_GROUP_CONCAT) { - r1 = pt_print_bytes (parser, p->info.function.arg_list); + if (p->info.function.arg_list != NULL) + { + r1 = pt_print_bytes (parser, p->info.function.arg_list); + } + else + { + // it is unexpected but a badly formed function may miss its arg_list. + r1 = NULL; + } + if (p->info.function.order_by != NULL) { PARSER_VARCHAR *r2; @@ -12423,8 +12503,9 @@ pt_print_function (PARSER_CONTEXT * parser, PT_NODE * p) r1 = pt_append_nulstring (parser, r1, " order by "); r1 = pt_append_varchar (parser, r1, r2); } + /* SEPARATOR */ - if (p->info.function.arg_list->next != NULL) + if (p->info.function.arg_list != NULL && p->info.function.arg_list->next != NULL) { PARSER_VARCHAR *r2; /* print separator */ @@ -15626,6 +15707,12 @@ pt_apply_union_stmt (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, v p->info.query.into_list = g (parser, p->info.query.into_list, arg); p->info.query.order_by = g (parser, p->info.query.order_by, arg); p->info.query.orderby_for = g (parser, p->info.query.orderby_for, arg); + p->info.query.limit = g (parser, p->info.query.limit, arg); + + // todo - there is a lot less stuff here than on pt_apply_select. I am not sure this is safe. + // e.g. this is used for parser_copy_tree too. which should deep copy entire tree! otherwise we may have some + // unpleasant effects. + return p; } @@ -15724,6 +15811,7 @@ pt_print_union_stmt (PARSER_CONTEXT * parser, PT_NODE * p) static PT_NODE * pt_apply_update (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg) { + p->info.update.with = g (parser, p->info.update.with, arg); p->info.update.spec = g (parser, p->info.update.spec, arg); p->info.update.assignment = g (parser, p->info.update.assignment, arg); p->info.update.search_cond = g (parser, p->info.update.search_cond, arg); @@ -15772,6 +15860,12 @@ pt_print_update (PARSER_CONTEXT * parser, PT_NODE * p) { PARSER_VARCHAR *b = NULL, *r1; + if (p->info.update.with != NULL) + { + r1 = pt_print_bytes_l (parser, p->info.update.with); + b = pt_append_varchar (parser, b, r1); + } + b = pt_append_nulstring (parser, b, "update "); if (p->info.update.hint != PT_HINT_NONE) @@ -17942,9 +18036,6 @@ pt_is_const_expr_node (PT_NODE * node) return (pt_is_const_expr_node (node->info.expr.arg1) && pt_is_const_expr_node (node->info.expr.arg2)) ? true : false; case PT_SUBSTRING_INDEX: - case PT_JSON_SEARCH: - return (pt_is_const_expr_node (node->info.expr.arg1) && pt_is_const_expr_node (node->info.expr.arg2) - && pt_is_const_expr_node (node->info.expr.arg3)) ? true : false; case PT_SUBSTRING: case PT_LOCATE: return (pt_is_const_expr_node (node->info.expr.arg1) && pt_is_const_expr_node (node->info.expr.arg2) @@ -18124,7 +18215,10 @@ pt_is_const_expr_node (PT_NODE * node) case PT_COLLATION: case PT_JSON_TYPE: case PT_JSON_VALID: + case PT_JSON_QUOTE: + case PT_JSON_UNQUOTE: case PT_JSON_DEPTH: + case PT_JSON_PRETTY: return pt_is_const_expr_node (node->info.expr.arg1); case PT_COERCIBILITY: /* coercibility is always folded to constant */ @@ -18585,9 +18679,11 @@ pt_is_allowed_as_function_index (const PT_NODE * expr) case PT_JSON_TYPE: case PT_JSON_EXTRACT: case PT_JSON_VALID: + case PT_JSON_QUOTE: + case PT_JSON_UNQUOTE: case PT_JSON_LENGTH: case PT_JSON_DEPTH: - case PT_JSON_SEARCH: + case PT_JSON_PRETTY: return true; case PT_TZ_OFFSET: default: @@ -18982,9 +19078,8 @@ pt_print_query_trace (PARSER_CONTEXT * parser, PT_NODE * p) return b; } -/* pt_clean_tree_copy_info - deallocate memory used by a PT_TREE_COPY_INFO - * - * +/* + * pt_clean_tree_copy_info () - deallocate memory used by a PT_TREE_COPY_INFO */ static void pt_clean_tree_copy_info (PT_TREE_COPY_INFO * tree_copy_info) @@ -18998,3 +19093,284 @@ pt_clean_tree_copy_info (PT_TREE_COPY_INFO * tree_copy_info) free (cte_info_it); } } + +static PT_NODE * +pt_init_json_table (PT_NODE * p) +{ + p->info.json_table_info.expr = NULL; + p->info.json_table_info.tree = NULL; + p->info.json_table_info.is_correlated = false; + return p; +} + +static PT_NODE * +pt_apply_json_table (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg) +{ + p->info.json_table_info.expr = g (parser, p->info.json_table_info.expr, arg); + p->info.json_table_info.tree = g (parser, p->info.json_table_info.tree, arg); + return p; +} + +static PARSER_VARCHAR * +pt_print_json_table (PARSER_CONTEXT * parser, PT_NODE * p) +{ + PARSER_VARCHAR *pstr = NULL; + PARSER_VARCHAR *substr = NULL; + + // print format: + // json_table (.expr, .tree) + + // 'json_table (' + pstr = pt_append_nulstring (parser, pstr, "json_table ("); + + // print expr + substr = pt_print_bytes (parser, p->info.json_table_info.expr); + pstr = pt_append_varchar (parser, pstr, substr); + + // ', ' print tree + pstr = pt_append_nulstring (parser, pstr, ", "); + substr = pt_print_bytes (parser, p->info.json_table_info.tree); + pstr = pt_append_varchar (parser, pstr, substr); + + // ')' + pstr = pt_append_nulstring (parser, pstr, ")"); + + return pstr; +} + +static PT_NODE * +pt_init_json_table_node (PT_NODE * p) +{ + p->info.json_table_node_info.columns = NULL; + p->info.json_table_node_info.nested_paths = NULL; + p->info.json_table_node_info.path = NULL; + return p; +} + +static PT_NODE * +pt_apply_json_table_node (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg) +{ + p->info.json_table_node_info.columns = g (parser, p->info.json_table_node_info.columns, arg); + p->info.json_table_node_info.nested_paths = g (parser, p->info.json_table_node_info.nested_paths, arg); + return p; +} + +static PARSER_VARCHAR * +pt_print_json_table_node (PARSER_CONTEXT * parser, PT_NODE * p) +{ + PARSER_VARCHAR *pstr = NULL; + PARSER_VARCHAR *substr = NULL; + + // print format: + // .path columns (.columns, .nested paths) + + // todo - print columns and nested path in same order as defined by user... + + // print path + pstr = pt_append_nulstring (parser, pstr, "'"); + pstr = pt_append_nulstring (parser, pstr, p->info.json_table_node_info.path); + pstr = pt_append_nulstring (parser, pstr, "'"); + + // 'columns (' + pstr = pt_append_nulstring (parser, pstr, " columns ("); + + // print columns + substr = pt_print_bytes (parser, p->info.json_table_node_info.columns); + pstr = pt_append_varchar (parser, pstr, substr); + + if (p->info.json_table_node_info.nested_paths != NULL) + { + // ', nested path ' print nested + pstr = pt_append_nulstring (parser, pstr, ", nested path "); + substr = pt_print_bytes (parser, p->info.json_table_node_info.nested_paths); + pstr = pt_append_varchar (parser, pstr, substr); + } + + // ' )' + pstr = pt_append_nulstring (parser, pstr, " )"); + + return pstr; +} + +static PT_NODE * +pt_init_json_table_column (PT_NODE * p) +{ + p->info.json_table_column_info.name = NULL; + p->info.json_table_column_info.path = NULL; + p->info.json_table_column_info.func = JSON_TABLE_EXTRACT; + p->info.json_table_column_info.on_error.m_behavior = JSON_TABLE_RETURN_NULL; + p->info.json_table_column_info.on_error.m_default_value = NULL; + p->info.json_table_column_info.on_empty.m_behavior = JSON_TABLE_RETURN_NULL; + p->info.json_table_column_info.on_empty.m_default_value = NULL; + return p; +} + +static PT_NODE * +pt_apply_json_table_column (PARSER_CONTEXT * parser, PT_NODE * p, PT_NODE_FUNCTION g, void *arg) +{ + p->info.json_table_column_info.name = g (parser, p->info.json_table_column_info.name, arg); + return p; +} + +// +// pt_json_table_column_behavior_to_string () +// +// return : stringify behavior +// behavior_type (in) : behavior enum value +// +static const char * +pt_json_table_column_behavior_to_string (const json_table_column_behavior_type & behavior_type) +{ + switch (behavior_type) + { + case json_table_column_behavior_type::JSON_TABLE_RETURN_NULL: + return "NULL"; + + case json_table_column_behavior_type::JSON_TABLE_DEFAULT_VALUE: + return "DEFAULT"; + + case json_table_column_behavior_type::JSON_TABLE_THROW_ERROR: + return "ERROR"; + + default: + assert (false); + return "UNKNOWN BEHAVIOR"; + } +} + +// +// pt_print_json_table_column_error_or_empty_behavior () - print json table column behavior +// +// return : parser varchar +// parser (in) : parser context +// pstr (in/out) : parser varchar where printed column behavior is appended +// column_behavior (in) : column behavior +// +static PARSER_VARCHAR * +pt_print_json_table_column_error_or_empty_behavior (PARSER_CONTEXT * parser, PARSER_VARCHAR * pstr, + const struct json_table_column_behavior &column_behavior) +{ + PARSER_VARCHAR *substr = NULL; + + // print behavior type + pstr = pt_append_nulstring (parser, pstr, pt_json_table_column_behavior_to_string (column_behavior.m_behavior)); + + if (column_behavior.m_behavior == json_table_column_behavior_type::JSON_TABLE_DEFAULT_VALUE) + { + pstr = pt_append_nulstring (parser, pstr, " "); + + substr = pt_print_db_value (parser, column_behavior.m_default_value); + pstr = pt_append_varchar (parser, pstr, substr); + } + + return pstr; +} + +// +// pt_print_json_table_column_info () - print json table column info +// +// return : parser varchar +// parser (in) : parser context +// p (in) : print column +// pstr (in/out) : parser varchar where printed column info is appended +// +static PARSER_VARCHAR * +pt_print_json_table_column_info (PARSER_CONTEXT * parser, PT_NODE * p, PARSER_VARCHAR * pstr) +{ + PARSER_VARCHAR *substr = NULL; + const char *type = NULL; + + assert (p->node_type == PT_JSON_TABLE_COLUMN); + + // print format: + // name FOR ORDINALITY + // | name type PATH string path[on_error][on_empty] + // | name type EXISTS PATH string path + + // print name + pstr = pt_append_nulstring (parser, pstr, p->info.json_table_column_info.name->info.name.original); + + // get the type + type = pt_type_enum_to_db_domain_name (p->type_enum); + + switch (p->info.json_table_column_info.func) + { + case json_table_column_function::JSON_TABLE_ORDINALITY: + // print FOR ORDINALITY + pstr = pt_append_nulstring (parser, pstr, " FOR ORDINALITY"); + break; + + case json_table_column_function::JSON_TABLE_EXTRACT: + // print type + pstr = pt_append_nulstring (parser, pstr, " "); + pstr = pt_append_nulstring (parser, pstr, type); + + // print PATH + pstr = pt_append_nulstring (parser, pstr, " PATH "); + + // print path + pstr = pt_append_nulstring (parser, pstr, "'"); + pstr = pt_append_nulstring (parser, pstr, p->info.json_table_column_info.path); + pstr = pt_append_nulstring (parser, pstr, "'"); + + // print on_error + pstr = pt_append_nulstring (parser, pstr, " "); + pstr = pt_print_json_table_column_error_or_empty_behavior (parser, pstr, p->info.json_table_column_info.on_error); + pstr = pt_append_nulstring (parser, pstr, " ON ERROR"); + + // print on_empty + pstr = pt_append_nulstring (parser, pstr, " "); + pstr = pt_print_json_table_column_error_or_empty_behavior (parser, pstr, p->info.json_table_column_info.on_empty); + pstr = pt_append_nulstring (parser, pstr, " ON EMPTY"); + break; + + case json_table_column_function::JSON_TABLE_EXISTS: + // print type + pstr = pt_append_nulstring (parser, pstr, " "); + pstr = pt_append_nulstring (parser, pstr, type); + + // print EXISTS PATH + pstr = pt_append_nulstring (parser, pstr, " EXISTS PATH "); + + // print path + pstr = pt_append_nulstring (parser, pstr, "'"); + pstr = pt_append_nulstring (parser, pstr, p->info.json_table_column_info.path); + pstr = pt_append_nulstring (parser, pstr, "'"); + break; + + default: + /* should not be here */ + assert (false); + break; + } + + return pstr; +} + +static PARSER_VARCHAR * +pt_print_json_table_columns (PARSER_CONTEXT * parser, PT_NODE * p) +{ + PARSER_VARCHAR *pstr = NULL; + PT_NODE *p_it = NULL; + + // append each column + for (p_it = p; p_it->next != NULL; p_it = p_it->next) + { + pstr = pt_print_json_table_column_info (parser, p_it, pstr); + // print ',' + pstr = pt_append_nulstring (parser, pstr, ", "); + } + + // the last column + pstr = pt_print_json_table_column_info (parser, p_it, pstr); + + return pstr; +} + +// pt_move_node - move PT_NODE pointer from source to destination. useful to automatically assign and unlink +void +pt_move_node (REFPTR (PT_NODE, destp), REFPTR (PT_NODE, srcp)) +{ + destp = srcp; + srcp = NULL; +} diff --git a/src/parser/parser.h b/src/parser/parser.h index ad95e4b8d15..9325bf9a028 100644 --- a/src/parser/parser.h +++ b/src/parser/parser.h @@ -31,7 +31,6 @@ #include #include "system.h" #include "dbtype_def.h" -#include "dbdef.h" #include "parse_tree.h" #if defined (SERVER_MODE) @@ -63,6 +62,8 @@ extern "C" HIDDEN_CLASSOID_NAME } VIEW_HANDLING; + extern size_t json_table_column_count; + extern PT_NODE **parser_main (PARSER_CONTEXT * p); extern void parser_final (void); @@ -648,4 +649,8 @@ extern "C" } #endif -#endif /* _PARSER_H_ */ +#if defined __cplusplus +extern void pt_move_node (REFPTR (PT_NODE, destp), REFPTR (PT_NODE, srcp)); +#endif // c++ + +#endif /* _PARSER_H_ */ diff --git a/src/parser/parser_message.h b/src/parser/parser_message.h index 438d186df75..4a547838888 100644 --- a/src/parser/parser_message.h +++ b/src/parser/parser_message.h @@ -163,6 +163,7 @@ #define MSGCAT_SYNTAX_INVALID_SESSION_TIMEZONE MSGCAT_SYNTAX_NO(125) #define MSGCAT_SYNTAX_MAX_CLASS_COMMENT_LEN MSGCAT_SYNTAX_NO(126) #define MSGCAT_SYNTAX_MAX_COMMENT_LEN MSGCAT_SYNTAX_NO(127) +#define MSGCAT_SYNTAX_INVALID_JSON_OBJECTAGG MSGCAT_SYNTAX_NO(128) /* Message id in the set MSGCAT_SET_PARSER_SEMANTIC */ diff --git a/src/parser/parser_support.c b/src/parser/parser_support.c index e8b6550cd00..e7b8cbe36e3 100644 --- a/src/parser/parser_support.c +++ b/src/parser/parser_support.c @@ -675,7 +675,8 @@ pt_is_aggregate_function (PARSER_CONTEXT * parser, const PT_NODE * node) || function_type == PT_AGG_BIT_AND || function_type == PT_AGG_BIT_OR || function_type == PT_AGG_BIT_XOR || function_type == PT_GROUP_CONCAT || function_type == PT_MEDIAN || function_type == PT_PERCENTILE_CONT || function_type == PT_PERCENTILE_DISC || function_type == PT_CUME_DIST - || function_type == PT_PERCENT_RANK)) + || function_type == PT_PERCENT_RANK || function_type == PT_JSON_ARRAYAGG + || function_type == PT_JSON_OBJECTAGG)) { return true; } @@ -720,9 +721,11 @@ pt_is_expr_wrapped_function (PARSER_CONTEXT * parser, const PT_NODE * node) function_type = node->info.function.function_type; if (function_type == F_INSERT_SUBSTRING || function_type == F_ELT || function_type == F_JSON_OBJECT || function_type == F_JSON_ARRAY || function_type == F_JSON_INSERT || function_type == F_JSON_REMOVE - || function_type == F_JSON_MERGE || function_type == F_JSON_ARRAY_APPEND - || function_type == F_JSON_GET_ALL_PATHS || function_type == F_JSON_REPLACE || function_type == F_JSON_SET - || function_type == F_JSON_KEYS) + || function_type == F_JSON_MERGE || function_type == F_JSON_MERGE_PATCH + || function_type == F_JSON_ARRAY_APPEND || function_type == F_JSON_ARRAY_INSERT + || function_type == F_JSON_CONTAINS_PATH || function_type == F_JSON_GET_ALL_PATHS + || function_type == F_JSON_REPLACE || function_type == F_JSON_SET + || function_type == F_JSON_KEYS || function_type == F_JSON_SEARCH) { return true; } @@ -4469,7 +4472,7 @@ regu_agg_init (AGGREGATE_TYPE * ptr) ptr->accumulator.curr_cnt = 0; ptr->function = (FUNC_TYPE) 0; ptr->option = (QUERY_OPTIONS) 0; - regu_var_init (&ptr->operand); + ptr->operands = NULL; ptr->list_id = NULL; ptr->sort_list = NULL; memset (&ptr->info, 0, sizeof (AGGREGATE_SPECIFIC_FUNCTION_INFO)); @@ -4810,6 +4813,12 @@ regu_spec_init (ACCESS_SPEC_TYPE * ptr, TARGET_TYPE type) ACCESS_SPEC_XASL_NODE (ptr) = NULL; ACCESS_SPEC_METHOD_SIG_LIST (ptr) = NULL; } + else if (type == TARGET_JSON_TABLE) + { + ACCESS_SPEC_JSON_TABLE_REGU_VAR (ptr) = NULL; + ACCESS_SPEC_JSON_TABLE_ROOT_NODE (ptr) = NULL; + ACCESS_SPEC_JSON_TABLE_M_NODE_COUNT (ptr) = 0; + } ptr->single_fetch = (QPROC_SINGLE_FETCH) false; ptr->s_dbval = NULL; ptr->next = NULL; @@ -9096,7 +9105,7 @@ pt_make_query_show_create_table (PARSER_CONTEXT * parser, PT_NODE * table_name) for (; dim < block.dim + len; dim *= 2) // calc next power of 2 >= b.dim+len ; - mem::block b{ dim, (char *) parser_alloc (parser, dim) }; + mem::block b{ dim, (char *) parser_alloc (parser, (const int) dim) }; memcpy (b.ptr, block.ptr, block.dim); // copy old content block = std::move (b); }, diff --git a/src/parser/scanner_support.c b/src/parser/scanner_support.c index 406f3c746e6..9d24b36b41f 100644 --- a/src/parser/scanner_support.c +++ b/src/parser/scanner_support.c @@ -33,7 +33,6 @@ #define JP_MAXNAME 256 #include "parser.h" -#include "dbdef.h" #include "chartype.h" #include "language_support.h" #include "intl_support.h" diff --git a/src/parser/semantic_check.c b/src/parser/semantic_check.c index bd8b25a5210..00c25909456 100644 --- a/src/parser/semantic_check.c +++ b/src/parser/semantic_check.c @@ -5217,7 +5217,11 @@ pt_find_partition_column_count_func (PT_NODE * func, PT_NODE ** name_node) case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_CONTAINS_PATH: + case F_JSON_SEARCH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: break; default: @@ -9791,7 +9795,7 @@ pt_semantic_check_local (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int break; } - if (entity->info.spec.derived_table != NULL) + if (entity->info.spec.derived_table != NULL || PT_SPEC_IS_CTE (entity)) { PT_ERRORm (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_UPDATE_DERIVED_TABLE); break; @@ -10009,6 +10013,17 @@ pt_semantic_check_local (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int } break; + case PT_JSON_TABLE: + if (node->info.json_table_info.expr->type_enum != PT_TYPE_JSON + && node->info.json_table_info.expr->type_enum != PT_TYPE_CHAR + && node->info.json_table_info.expr->type_enum != PT_TYPE_MAYBE) + { + // todo: can this be improved to hint that we are talking about json_table's expression + PT_ERRORmf (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_WANT_TYPE, + pt_show_type_enum (PT_TYPE_JSON)); + } + break; + default: /* other node types */ break; } @@ -15142,7 +15157,11 @@ pt_check_filter_index_expr_pre (PARSER_CONTEXT * parser, PT_NODE * node, void *a case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_SEARCH: + case F_JSON_CONTAINS_PATH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: /* valid expression, nothing to do */ break; @@ -15896,16 +15915,13 @@ pt_check_union_is_foldable (PARSER_CONTEXT * parser, PT_NODE * union_node) PT_NODE * pt_fold_union (PARSER_CONTEXT * parser, PT_NODE * union_node, STATEMENT_SET_FOLD fold_as) { - PT_NODE *arg1, *arg2, *new_node, *next; + PT_NODE *new_node, *next; int line, column; const char *alias_print; assert (union_node->node_type == PT_UNION || union_node->node_type == PT_INTERSECTION || union_node->node_type == PT_DIFFERENCE); - arg1 = union_node->info.query.q.union_.arg1; - arg2 = union_node->info.query.q.union_.arg2; - line = union_node->line_number; column = union_node->column_number; alias_print = union_node->alias_print; @@ -15927,19 +15943,19 @@ pt_fold_union (PARSER_CONTEXT * parser, PT_NODE * union_node, STATEMENT_SET_FOLD if (fold_as == STATEMENT_SET_FOLD_AS_ARG1) { - active = arg1; + pt_move_node (active, union_node->info.query.q.union_.arg1); } else { - active = arg2; + pt_move_node (active, union_node->info.query.q.union_.arg2); } /* to save union's orderby or limit clause to arg1 or arg2 */ - union_orderby = union_node->info.query.order_by; - union_orderby_for = union_node->info.query.orderby_for; - union_limit = union_node->info.query.limit; + pt_move_node (union_orderby, union_node->info.query.order_by); + pt_move_node (union_orderby_for, union_node->info.query.orderby_for); + pt_move_node (union_limit, union_node->info.query.limit); union_rewrite_limit = union_node->info.query.rewrite_limit; - union_with_clause = union_node->info.query.with; + pt_move_node (union_with_clause, union_node->info.query.with); /* When active node has a limit or orderby_for clause and union node has a limit or ORDERBY clause, need a * derived table to keep both conflicting clauses. When a subquery has orderby clause without @@ -15963,20 +15979,7 @@ pt_fold_union (PARSER_CONTEXT * parser, PT_NODE * union_node, STATEMENT_SET_FOLD new_node = active; } - /* unlink and free union node */ - union_node->info.query.order_by = NULL; - union_node->info.query.orderby_for = NULL; - union_node->info.query.limit = NULL; - if (fold_as == STATEMENT_SET_FOLD_AS_ARG1) - { - union_node->info.query.q.union_.arg1 = NULL; /* to save arg1 to fold */ - } - else - { - union_node->info.query.q.union_.arg2 = NULL; /* to save arg2 to fold */ - } - union_node->info.query.with = NULL; - + /* free union node */ parser_free_tree (parser, union_node); /* to fold the query with remaining parts */ diff --git a/src/parser/show_meta.c b/src/parser/show_meta.c old mode 100755 new mode 100644 diff --git a/src/parser/show_meta.h b/src/parser/show_meta.h old mode 100755 new mode 100644 diff --git a/src/parser/type_checking.c b/src/parser/type_checking.c index 2af588192e9..a973e56d0ab 100644 --- a/src/parser/type_checking.c +++ b/src/parser/type_checking.c @@ -54,6 +54,7 @@ #include "network_interface_cl.h" #include "object_template.h" #include "db.h" +#include "tz_support.h" #include "dbtype.h" @@ -321,6 +322,7 @@ static PT_NODE *pt_check_function_collation (PARSER_CONTEXT * parser, PT_NODE * static void pt_hv_consistent_data_type_with_domain (PARSER_CONTEXT * parser, PT_NODE * node); static void pt_update_host_var_data_type (PARSER_CONTEXT * parser, PT_NODE * hv_node); static bool pt_cast_needs_wrap_for_collation (PT_NODE * node, const INTL_CODESET codeset); +static PT_TYPE_ENUM pt_to_variable_size_type (PT_TYPE_ENUM type_enum); /* * pt_get_expression_definition () - get the expression definition for the @@ -4823,7 +4825,7 @@ pt_get_expression_definition (const PT_OP_TYPE op, EXPRESSION_DEFINITION * def) /* return type */ sig.return_type.is_generic = false; - sig.return_type.val.type = PT_TYPE_CHAR; + sig.return_type.val.type = PT_TYPE_VARCHAR; def->overloads[num++] = sig; def->overloads_count = num; @@ -4858,6 +4860,38 @@ pt_get_expression_definition (const PT_OP_TYPE op, EXPRESSION_DEFINITION * def) sig.return_type.val.type = PT_TYPE_INTEGER; def->overloads[num++] = sig; + def->overloads_count = num; + break; + case PT_JSON_QUOTE: + num = 0; + + /* one overload */ + + /* arg1 */ + sig.arg1_type.is_generic = false; + sig.arg1_type.val.type = PT_TYPE_CHAR; + + /* return type */ + sig.return_type.is_generic = false; + sig.return_type.val.type = PT_TYPE_CHAR; + def->overloads[num++] = sig; + + def->overloads_count = num; + break; + case PT_JSON_UNQUOTE: + num = 0; + + /* one overload */ + + /* arg1 */ + sig.arg1_type.is_generic = false; + sig.arg1_type.val.type = PT_TYPE_JSON; + + /* return type */ + sig.return_type.is_generic = false; + sig.return_type.val.type = PT_TYPE_VARCHAR; + def->overloads[num++] = sig; + def->overloads_count = num; break; case PT_JSON_LENGTH: @@ -4912,22 +4946,18 @@ pt_get_expression_definition (const PT_OP_TYPE op, EXPRESSION_DEFINITION * def) def->overloads_count = num; break; - case PT_JSON_SEARCH: + case PT_JSON_PRETTY: num = 0; + /* one overload */ + /* arg1 */ sig.arg1_type.is_generic = false; sig.arg1_type.val.type = PT_TYPE_JSON; - /* arg2 */ - sig.arg2_type.is_generic = false; - sig.arg2_type.val.type = PT_TYPE_CHAR; - /* arg3 */ - sig.arg3_type.is_generic = false; - sig.arg3_type.val.type = PT_TYPE_CHAR; /* return type */ sig.return_type.is_generic = false; - sig.return_type.val.type = PT_TYPE_JSON; + sig.return_type.val.type = PT_TYPE_VARCHAR; def->overloads[num++] = sig; def->overloads_count = num; @@ -6920,7 +6950,9 @@ pt_is_symmetric_op (const PT_OP_TYPE op) case PT_JSON_VALID: case PT_JSON_LENGTH: case PT_JSON_DEPTH: - case PT_JSON_SEARCH: + case PT_JSON_QUOTE: + case PT_JSON_UNQUOTE: + case PT_JSON_PRETTY: return false; default: @@ -7711,8 +7743,14 @@ pt_eval_type_pre (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *conti /* propagate to children */ arg1 = node->info.query.q.union_.arg1; arg2 = node->info.query.q.union_.arg2; - arg1->info.query.has_outer_spec = node->info.query.has_outer_spec; - arg2->info.query.has_outer_spec = node->info.query.has_outer_spec; + if (arg1 != NULL) + { + arg1->info.query.has_outer_spec = node->info.query.has_outer_spec; + } + if (arg2 != NULL) + { + arg2->info.query.has_outer_spec = node->info.query.has_outer_spec; + } /* rewrite limit clause as numbering expression and add it to the corresponding predicate */ if (node->info.query.limit && node->info.query.rewrite_limit) @@ -10058,6 +10096,13 @@ pt_eval_expr_type (PARSER_CONTEXT * parser, PT_NODE * node) /* we will end up with logical here if arg3 and arg2 are logical */ common_type = PT_TYPE_INTEGER; } + // CBRD-22431 hack: + // we have an issue with different precision domains when value is packed into a list file and then used by a + // list scan. in the issue, the value is folded and packed with fixed precision, but the unpacking expects + // no precision, corrupting the read. + // next line is a quick fix to force no precision domain; however, we should consider a more robus list scan + // implementation that always matches domains used to generate the list file + common_type = pt_to_variable_size_type (common_type); if (pt_coerce_expression_argument (parser, node, &arg2, common_type, NULL) != NO_ERROR) { node->type_enum = PT_TYPE_NONE; @@ -12171,10 +12216,13 @@ pt_upd_domain_info (PARSER_CONTEXT * parser, PT_NODE * arg1, PT_NODE * arg2, PT_ || node->info.function.function_type == F_JSON_OBJECT || node->info.function.function_type == F_JSON_ARRAY || node->info.function.function_type == F_JSON_INSERT || node->info.function.function_type == F_JSON_REMOVE || node->info.function.function_type == F_JSON_MERGE + || node->info.function.function_type == F_JSON_MERGE_PATCH || node->info.function.function_type == F_JSON_ARRAY_APPEND + || node->info.function.function_type == F_JSON_ARRAY_INSERT + || node->info.function.function_type == F_JSON_CONTAINS_PATH || node->info.function.function_type == F_JSON_GET_ALL_PATHS || node->info.function.function_type == F_JSON_REPLACE || node->info.function.function_type == F_JSON_SET - || node->info.function.function_type == F_JSON_KEYS) + || node->info.function.function_type == F_JSON_SEARCH || node->info.function.function_type == F_JSON_KEYS) { assert (dt == NULL); dt = pt_make_prim_data_type (parser, node->type_enum); @@ -12953,6 +13001,71 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) } break; + case PT_JSON_ARRAYAGG: + { + bool is_supported = pt_is_json_value_type (arg_list->type_enum); + + if (!is_supported) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (arg_list->type_enum)); + break; + } + + /* cast arg_list to json */ + arg_list = pt_wrap_with_cast_op (parser, arg_list, PT_TYPE_JSON, 0, 0, NULL); + if (arg_list == NULL) + { + return node; + } + + arg_type = PT_TYPE_JSON; + node->info.function.arg_list = arg_list; + } + break; + + case PT_JSON_OBJECTAGG: + { + // we will have 2 arguments (key, value) + // the key needs to be STRING type and the value can be any type compatible with JSON type + + // check key + PT_NODE *key = arg_list; + PT_NODE *value = arg_list->next; + + if (!PT_IS_STRING_TYPE (key->type_enum)) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (key->type_enum)); + break; + } + + // check value + bool is_supported = pt_is_json_value_type (value->type_enum); + if (!is_supported) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (value->type_enum)); + break; + } + + /* cast value to json */ + arg_list->next = pt_wrap_with_cast_op (parser, value, PT_TYPE_JSON, 0, 0, NULL); + if (arg_list->next == NULL) + { + return node; + } + + arg_type = PT_TYPE_JSON; + node->info.function.arg_list = arg_list; + // JSON_OBJECTAGG requires 2 arguments + check_agg_single_arg = false; + } + break; + case F_JSON_OBJECT: { PT_TYPE_ENUM unsupported_type; @@ -13028,6 +13141,7 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) break; case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: { PT_TYPE_ENUM unsupported_type; bool is_supported; @@ -13063,6 +13177,7 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) case F_JSON_REPLACE: case F_JSON_SET: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: { PT_TYPE_ENUM unsupported_type; unsigned int index = 0; @@ -13108,6 +13223,52 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) } break; + case F_JSON_CONTAINS_PATH: + { + PT_TYPE_ENUM unsupported_type; + unsigned int index = 1; + bool is_supported = false; + PT_NODE *arg = arg_list; + + is_supported = pt_is_json_doc_type (arg->type_enum); + if (!is_supported) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (arg->type_enum)); + break; + } + + arg = arg->next; + while (arg) + { + if (index > 1) + { + is_supported = pt_is_json_path (arg->type_enum); + } + else + { + is_supported = pt_is_json_doc_type (arg->type_enum); + } + + if (!is_supported) + { + unsupported_type = arg->type_enum; + break; + } + + arg = arg->next; + index++; + } + if (!is_supported) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (unsupported_type)); + } + } + break; + case F_JSON_REMOVE: { PT_TYPE_ENUM unsupported_type; @@ -13162,6 +13323,51 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) } break; + case F_JSON_SEARCH: + { + // JSON_SEARCH (json_doc, one_or_all, pattern, [esc_charr, path_1, ... path_n]) + PT_TYPE_ENUM unsupported_type; + PT_NODE *arg = arg_list; + bool is_supported = false; + is_supported = pt_is_json_doc_type (arg->type_enum); + if (!is_supported) + { + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (arg->type_enum)); + break; + } + + arg = arg->next; + unsigned int index = 1; + while (arg) + { + if (index < 4) + { + is_supported = (PT_IS_STRING_TYPE (arg->type_enum) || arg->type_enum == PT_TYPE_MAYBE + || arg->type_enum == PT_TYPE_NULL || arg->type_enum == PT_TYPE_NA); + } + else + { + // args[4+] can be only paths + is_supported = pt_is_json_path (arg->type_enum); + } + + if (!is_supported) + { + unsupported_type = arg->type_enum; + arg_type = PT_TYPE_NONE; + PT_ERRORmf2 (parser, node, MSGCAT_SET_PARSER_SEMANTIC, MSGCAT_SEMANTIC_FUNC_NOT_DEFINED_ON, + pt_show_function (fcode), pt_show_type_enum (unsupported_type)); + break; + } + + ++index; + arg = arg->next; + } + } + break; + case F_JSON_KEYS: { // should have maximum 2 parameters @@ -13178,7 +13384,7 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) is_supported = pt_is_json_doc_type (arg->type_enum); break; case 1: - is_supported = pt_is_json_path (arg->type_enum);; + is_supported = pt_is_json_path (arg->type_enum); break; default: /* Should not happen */ @@ -13526,6 +13732,9 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) node->data_type = NULL; break; + + case PT_JSON_ARRAYAGG: + case PT_JSON_OBJECTAGG: case F_JSON_OBJECT: case F_JSON_ARRAY: case F_JSON_INSERT: @@ -13534,10 +13743,16 @@ pt_eval_function_type (PARSER_CONTEXT * parser, PT_NODE * node) case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: + case F_JSON_SEARCH: node->type_enum = PT_TYPE_JSON; break; + case F_JSON_CONTAINS_PATH: + node->type_enum = PT_TYPE_INTEGER; + break; case PT_MEDIAN: case PT_PERCENTILE_CONT: case PT_PERCENTILE_DISC: @@ -16945,11 +17160,30 @@ pt_evaluate_db_value_expr (PARSER_CONTEXT * parser, PT_NODE * expr, PT_OP_TYPE o return 0; } break; - case PT_JSON_SEARCH: - error = ER_DB_UNIMPLEMENTED; - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_DB_UNIMPLEMENTED, 1, "JSON_SEARCH"); - PT_ERRORc (parser, o1, er_msg ()); - return 0; + case PT_JSON_QUOTE: + error = db_string_quote (arg1, result); + if (error != NO_ERROR) + { + PT_ERRORc (parser, o1, er_msg ()); + return 0; + } + break; + case PT_JSON_UNQUOTE: + error = db_json_unquote_dbval (arg1, result); + if (error != NO_ERROR) + { + PT_ERRORc (parser, o1, er_msg ()); + return 0; + } + break; + case PT_JSON_PRETTY: + error = db_json_pretty_dbval (arg1, result); + if (error != NO_ERROR) + { + PT_ERRORc (parser, o1, er_msg ()); + return 0; + } + break; case PT_POWER: error = db_power_dbval (result, arg1, arg2); if (error != NO_ERROR) @@ -20260,6 +20494,7 @@ pt_evaluate_function_w_args (PARSER_CONTEXT * parser, FUNC_TYPE fcode, DB_VALUE return 0; } break; + case F_ELT: error = db_string_elt (result, args, num_args); if (error != NO_ERROR) @@ -20267,91 +20502,75 @@ pt_evaluate_function_w_args (PARSER_CONTEXT * parser, FUNC_TYPE fcode, DB_VALUE return 0; } break; + case F_JSON_OBJECT: error = db_json_object (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_ARRAY: error = db_json_array (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_INSERT: error = db_json_insert (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_REPLACE: error = db_json_replace (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_SET: error = db_json_set (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_KEYS: error = db_json_keys (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_REMOVE: error = db_json_remove (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + case F_JSON_ARRAY_APPEND: error = db_json_array_append (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + + case F_JSON_ARRAY_INSERT: + error = db_json_array_insert (result, args, num_args); + break; + + case F_JSON_CONTAINS_PATH: + error = db_json_contains_path (result, args, num_args); + break; + case F_JSON_MERGE: error = db_json_merge (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + + case F_JSON_MERGE_PATCH: + error = db_json_merge_patch (result, args, num_args); + break; + + case F_JSON_SEARCH: + error = db_json_search_dbval (result, args, num_args); + break; + case F_JSON_GET_ALL_PATHS: error = db_json_get_all_paths (result, args, num_args); - if (error != NO_ERROR) - { - PT_ERRORc (parser, NULL, er_msg ()); - return 0; - } break; + default: /* a supported function doesn't have const folding code */ assert (false); break; } + + if (error != NO_ERROR) + { + PT_ERRORc (parser, NULL, er_msg ()); + return 0; + } + return 1; } @@ -25308,3 +25527,25 @@ pt_cast_needs_wrap_for_collation (PT_NODE * node, const INTL_CODESET codeset) return false; } + +// +// pt_to_variable_size_type () - convert fixed size types to the equivalent variable size types +// +// return : if input type can have variable size, it returns the variable size. otherwise, returns input type +// type_enum (in) : any type +// +static PT_TYPE_ENUM +pt_to_variable_size_type (PT_TYPE_ENUM type_enum) +{ + switch (type_enum) + { + case PT_TYPE_CHAR: + return PT_TYPE_VARCHAR; + case PT_TYPE_NCHAR: + return PT_TYPE_VARNCHAR; + case PT_TYPE_BIT: + return PT_TYPE_VARBIT; + default: + return type_enum; + } +} diff --git a/src/parser/view_transform.c b/src/parser/view_transform.c index c3a7387e0a4..fdad31e7297 100644 --- a/src/parser/view_transform.c +++ b/src/parser/view_transform.c @@ -8854,14 +8854,24 @@ mq_class_lambda (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE * class_, spec->info.spec.path_entities = NULL; if (newspec) { - if (newspec->info.spec.entity_name == NULL) + if (newspec->info.spec.derived_table_type == PT_DERIVED_JSON_TABLE) { - newspec->info.spec.entity_name = spec->info.spec.entity_name; - /* spec will be free later, we don't want the entity_name will be freed */ - spec->info.spec.entity_name = NULL; + /* flat_entity_list is needed to gather referenced oids in xasl_generation + * in pt_spec_to_xasl_class_oid_list */ + newspec->info.spec.flat_entity_list = spec->info.spec.flat_entity_list; + spec->info.spec.flat_entity_list = NULL; + } + else + { + if (newspec->info.spec.entity_name == NULL) + { + newspec->info.spec.entity_name = spec->info.spec.entity_name; + /* spec will be free later, we don't want the entity_name will be freed */ + spec->info.spec.entity_name = NULL; + } + newspec->info.spec.range_var->info.name.original = spec->info.spec.range_var->info.name.original; } - newspec->info.spec.range_var->info.name.original = spec->info.spec.range_var->info.name.original; newspec->info.spec.location = spec->info.spec.location; /* move join info */ if (spec->info.spec.join_type != PT_JOIN_NONE) diff --git a/src/parser/xasl_generation.c b/src/parser/xasl_generation.c index 723c400f4c7..42e71ae8adf 100644 --- a/src/parser/xasl_generation.c +++ b/src/parser/xasl_generation.c @@ -57,6 +57,7 @@ #include "semantic_check.h" #include "query_dump.h" #include "parser_support.h" +#include "compile_context.h" #if defined(WINDOWS) #include "wintcp.h" @@ -238,12 +239,26 @@ static REGU_VARIABLE *pt_to_regu_reserved_name (PARSER_CONTEXT * parser, PT_NODE static int pt_reserved_id_to_valuelist_index (PARSER_CONTEXT * parser, PT_RESERVED_NAME_ID reserved_id); static void pt_mark_spec_list_for_update_clause (PARSER_CONTEXT * parser, PT_NODE * statement, PT_SPEC_FLAG spec_flag); -static void update_value_list_out_list_regu_list (AGGREGATE_INFO * info, VAL_LIST * value_list, - REGU_VARIABLE_LIST out_list, REGU_VARIABLE_LIST regu_list, - REGU_VARIABLE * regu); +static void pt_aggregate_info_append_value_list (AGGREGATE_INFO * info, VAL_LIST * value_list); -static PT_NODE *pt_alloc_value_list_out_list_regu_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST ** value_list, - REGU_VARIABLE_LIST * out_list, REGU_VARIABLE_LIST * regu_list); +static void pt_aggregate_info_update_value_and_reguvar_lists (AGGREGATE_INFO * info, VAL_LIST * value_list, + REGU_VARIABLE_LIST regu_position_list, + REGU_VARIABLE_LIST regu_constant_list); + +static void pt_aggregate_info_update_scan_regu_list (AGGREGATE_INFO * info, REGU_VARIABLE_LIST scan_regu_list); + +static PT_NODE *pt_node_list_to_value_and_reguvar_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST ** value_list, + REGU_VARIABLE_LIST * regu_position_list); + +static PT_NODE *pt_make_regu_list_from_value_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST * value_list, + REGU_VARIABLE_LIST * regu_list); + +static int pt_make_constant_regu_list_from_val_list (PARSER_CONTEXT * parser, VAL_LIST * value_list, + REGU_VARIABLE_LIST * regu_list); + +/* *INDENT-OFF* */ +static void pt_set_regu_list_pos_descr_from_idx (REGU_VARIABLE_LIST & regu_list, size_t starting_index); +/* *INDENT-ON* */ static PT_NODE *pt_fix_interpolation_aggregate_function_order_by (PARSER_CONTEXT * parser, PT_NODE * node); static int pt_fix_buildlist_aggregate_cume_dist_percent_rank (PARSER_CONTEXT * parser, PT_NODE * node, @@ -333,6 +348,16 @@ static ACCESS_SPEC_TYPE *pt_to_set_expr_table_spec_list (PARSER_CONTEXT * parser PT_NODE * where_part); static ACCESS_SPEC_TYPE *pt_to_cselect_table_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE * cselect, PT_NODE * src_derived_tbl); +static ACCESS_SPEC_TYPE *pt_to_json_table_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE * json_table, + PT_NODE * src_derived_tbl, PT_NODE * where_p); +static ACCESS_SPEC_TYPE *pt_make_json_table_access_spec (PARSER_CONTEXT * parser, REGU_VARIABLE * json_reguvar, + PRED_EXPR * where_pred, PT_JSON_TABLE_INFO * json_table, + TABLE_INFO * tbl_info); +static json_table_node *pt_make_json_table_spec_node (PARSER_CONTEXT * parser, PT_JSON_TABLE_INFO * json_table, + size_t & start_id, TABLE_INFO * tbl_info); +static void pt_make_json_table_spec_node_internal (PARSER_CONTEXT * parser, PT_JSON_TABLE_NODE_INFO * jt_node_info, + size_t & current_id, TABLE_INFO * tbl_info, + json_table_node & result); static XASL_NODE *pt_find_xasl (XASL_NODE * list, XASL_NODE * match); static void pt_set_aptr (PARSER_CONTEXT * parser, PT_NODE * select_node, XASL_NODE * xasl); static XASL_NODE *pt_append_scan (const XASL_NODE * to, const XASL_NODE * from); @@ -508,7 +533,8 @@ static int pt_split_pred_regu_list (PARSER_CONTEXT * parser, const VAL_LIST * va REGU_VARIABLE_LIST * prior_regu_list_rest, REGU_VARIABLE_LIST * prior_regu_list_pred, bool split_prior); -static void pt_add_regu_var_to_list (REGU_VARIABLE_LIST * regu_list_dst, REGU_VARIABLE_LIST regu_list_node); +static void pt_add_regu_var_to_list (REGU_VARIABLE_LIST * destination, REGU_VARIABLE_LIST source); +static void pt_merge_regu_var_lists (REGU_VARIABLE_LIST * destination, REGU_VARIABLE_LIST source); static PRED_REGU_VARIABLE_P_LIST pt_get_pred_regu_variable_p_list (const PRED_EXPR * pred, int *err); @@ -3525,12 +3551,13 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c { bool is_agg = 0; REGU_VARIABLE *regu = NULL, *scan_regu = NULL; + REGU_VARIABLE *regu_next = NULL, *scan_regu_next = NULL; REGU_VARIABLE *percentile_regu = NULL; AGGREGATE_TYPE *aggregate_list; AGGREGATE_INFO *info = (AGGREGATE_INFO *) arg; REGU_VARIABLE_LIST scan_regu_list; - REGU_VARIABLE_LIST out_list; - REGU_VARIABLE_LIST regu_list; + REGU_VARIABLE_LIST scan_regu_next_list; + REGU_VARIABLE_LIST out_list = NULL; REGU_VARIABLE_LIST regu_temp; VAL_LIST *value_list; MOP classop; @@ -3539,6 +3566,14 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c PT_NODE *pt_val = NULL; PT_NODE *percentile = NULL; + // it contains a list of positions + REGU_VARIABLE_LIST regu_position_list = NULL; + // it contains a list of constants, which will be used for the operands + REGU_VARIABLE_LIST regu_constant_list = NULL; + + REGU_VARIABLE_LIST scan_regu_constant_list = NULL; + int error_code = NO_ERROR; + *continue_walk = PT_CONTINUE_WALK; is_agg = pt_is_aggregate_function (parser, tree); @@ -3622,11 +3657,13 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c { if (aggregate_list->function != PT_CUME_DIST && aggregate_list->function != PT_PERCENT_RANK) { - regu = pt_to_regu_variable (parser, tree->info.function.arg_list, UNBOX_AS_VALUE); + regu_constant_list = pt_to_regu_variable_list (parser, tree->info.function.arg_list, UNBOX_AS_VALUE, + NULL, NULL); - scan_regu = pt_to_regu_variable (parser, tree->info.function.arg_list, UNBOX_AS_VALUE); + scan_regu_constant_list = pt_to_regu_variable_list (parser, tree->info.function.arg_list, UNBOX_AS_VALUE, + NULL, NULL); - if (!regu || !scan_regu) + if (!regu_constant_list || !scan_regu_constant_list) { return NULL; } @@ -3639,6 +3676,12 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c { return NULL; } + + REGU_VARIABLE_LIST to_add = regu_varlist_alloc (); + to_add->value = *regu; + + // insert also in the regu_constant_list to ensure compatibility + pt_add_regu_var_to_list (®u_constant_list, to_add); } aggregate_list->domain = pt_xasl_node_to_domain (parser, tree); @@ -3657,6 +3700,7 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c /* set the next argument pointer (the separator argument) to NULL in order to avoid impacting the * regu vars generation. */ tree->info.function.arg_list->next = NULL; + pt_register_orphan_db_value (parser, aggregate_list->accumulator.value2); } else { @@ -3686,6 +3730,7 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c DB_DEFAULT_PRECISION, buf, 1, TP_DOMAIN_CODESET (aggregate_list->domain), TP_DOMAIN_COLLATION (aggregate_list->domain)); aggregate_list->accumulator.value2->need_clear = true; + pt_register_orphan_db_value (parser, aggregate_list->accumulator.value2); } else { @@ -3716,61 +3761,53 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c pt_val->info.value.data_value.i = 0; parser_append_node (pt_val, info->out_names); - pointer = - pt_alloc_value_list_out_list_regu_list (parser, tree->info.function.arg_list, &value_list, - &out_list, ®u_list); - if (pointer == NULL) + // for each element from arg_list we create a corresponding node in the value_list and regu_list + if (pt_node_list_to_value_and_reguvar_list (parser, tree->info.function.arg_list, + &value_list, ®u_position_list) == NULL) { - PT_ERROR (parser, tree, - msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_OUT_OF_MEMORY)); + PT_ERROR (parser, tree, msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_OUT_OF_MEMORY)); return NULL; } - aggregate_list->operand.type = TYPE_CONSTANT; - aggregate_list->operand.domain = pt_xasl_node_to_domain (parser, tree->info.function.arg_list); - aggregate_list->operand.value.dbvalptr = value_list->valp->val; - - regu_list->value.value.pos_descr.pos_no = info->out_list->valptr_cnt; - - update_value_list_out_list_regu_list (info, value_list, out_list, regu_list, regu); - - /* append regu to info->scan_regu_list */ - scan_regu_list = regu_varlist_alloc (); - if (!scan_regu_list) + error_code = pt_make_constant_regu_list_from_val_list (parser, value_list, &aggregate_list->operands); + if (error_code != NO_ERROR) { - PT_ERROR (parser, tree, - msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, - MSGCAT_SEMANTIC_OUT_OF_MEMORY)); + PT_ERROR (parser, tree, msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, + MSGCAT_SEMANTIC_OUT_OF_MEMORY)); return NULL; } - scan_regu->vfetch_to = pt_index_value (info->value_list, info->out_list->valptr_cnt - 1); - scan_regu_list->next = NULL; - scan_regu_list->value = *scan_regu; + // this regu_list has the TYPE_POSITION type so we need to set the corresponding indexes for elements + pt_set_regu_list_pos_descr_from_idx (regu_position_list, info->out_list->valptr_cnt); - regu_temp = info->scan_regu_list; - while (regu_temp->next) - { - regu_temp = regu_temp->next; - } - regu_temp->next = scan_regu_list; + // until now we have constructed the value_list, regu_list and out_list + // they are based on the current aggregate node information and we need to append them to the global + // information, i.e in info + pt_aggregate_info_update_value_and_reguvar_lists (info, value_list, regu_position_list, + regu_constant_list); + + // also we need to update the scan_regu_list from info + pt_aggregate_info_update_scan_regu_list (info, scan_regu_constant_list); } else { + assert (regu_constant_list != NULL && regu_constant_list->next == NULL); + /* for buildlist CUME_DIST/PERCENT_RANK, we have special treatment */ if (pt_fix_buildlist_aggregate_cume_dist_percent_rank (parser, tree->info.function.order_by, info, regu) != NO_ERROR) { return NULL; } - aggregate_list->operand = *regu; + + aggregate_list->operands = regu_constant_list; } } else { - /* handle the buildvalue case, simply uses regu as the operand */ - aggregate_list->operand = *regu; + // handle the buildvalue case, simply uses regu as the operand + aggregate_list->operands = regu_constant_list; } } else @@ -3785,8 +3822,14 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c regu_dbval_type_init (aggregate_list->accumulator.value2, DB_TYPE_INTEGER); aggregate_list->opr_dbtype = DB_TYPE_INTEGER; - /* hack. we need to pack some domain even though we don't need one, so we'll pack the int. */ - aggregate_list->operand.domain = &tp_Integer_domain; + aggregate_list->operands = regu_varlist_alloc (); + if (aggregate_list->operands == NULL) + { + PT_INTERNAL_ERROR (parser, "allocate new node"); + return NULL; + } + /* hack. we need to pack some domain even though we don't need one, so we'll pack the int. */ + aggregate_list->operands->value.domain = &tp_Integer_domain; } /* record the value for pt_to_regu_variable to use in "out arith" */ @@ -3809,6 +3852,9 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c return NULL; } + REGU_VARIABLE_LIST to_add = regu_varlist_alloc (); + to_add->value = *regu; + /* build list */ if (!PT_IS_CONST (percentile) && info->out_list != NULL && info->value_list != NULL && info->regu_list != NULL) @@ -3826,8 +3872,7 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c info->out_names = parser_append_node (pointer, info->out_names); /* put percentile in value_list, out_list and regu_list */ - pointer = pt_alloc_value_list_out_list_regu_list (parser, pointer, &value_list, &out_list, ®u_list); - if (pointer == NULL) + if (pt_node_list_to_value_and_reguvar_list (parser, pointer, &value_list, ®u_position_list) == NULL) { PT_ERROR (parser, percentile, msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, @@ -3849,9 +3894,9 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c aggregate_list->info.percentile.percentile_reguvar = percentile_regu; /* fix count for list position */ - regu_list->value.value.pos_descr.pos_no = info->out_list->valptr_cnt; + regu_position_list->value.value.pos_descr.pos_no = info->out_list->valptr_cnt; - update_value_list_out_list_regu_list (info, value_list, out_list, regu_list, regu); + pt_aggregate_info_update_value_and_reguvar_lists (info, value_list, regu_position_list, to_add); } else { @@ -3943,8 +3988,7 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c /* append the name on the out list */ info->out_names = parser_append_node (pointer, info->out_names); - pointer = pt_alloc_value_list_out_list_regu_list (parser, pointer, &value_list, &out_list, ®u_list); - if (pointer == NULL) + if (pt_node_list_to_value_and_reguvar_list (parser, pointer, &value_list, ®u_position_list) == NULL) { PT_ERROR (parser, tree, msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_PARSER_SEMANTIC, @@ -3953,7 +3997,7 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c } /* fix count for list position */ - regu_list->value.value.pos_descr.pos_no = info->out_list->valptr_cnt; + regu_position_list->value.value.pos_descr.pos_no = info->out_list->valptr_cnt; regu = pt_to_regu_variable (parser, tree, UNBOX_AS_VALUE); if (regu == NULL) @@ -3961,7 +4005,13 @@ pt_to_aggregate_node (PARSER_CONTEXT * parser, PT_NODE * tree, void *arg, int *c return NULL; } - update_value_list_out_list_regu_list (info, value_list, out_list, regu_list, regu); + REGU_VARIABLE_LIST to_add = regu_varlist_alloc (); + to_add->value = *regu; + + // insert also in the regu_constant_list to ensure compatibility + pt_add_regu_var_to_list (®u_constant_list, to_add); + + pt_aggregate_info_update_value_and_reguvar_lists (info, value_list, regu_position_list, regu_constant_list); } *continue_walk = PT_LIST_WALK; } @@ -4543,6 +4593,150 @@ pt_make_class_access_spec (PARSER_CONTEXT * parser, PT_NODE * flat, DB_OBJECT * return spec; } +static void +pt_create_json_table_column (PARSER_CONTEXT * parser, PT_NODE * jt_column, TABLE_INFO * tbl_info, + json_table_column & col_result) +{ + col_result.m_function = jt_column->info.json_table_column_info.func; + col_result.m_output_value_pointer = pt_index_value (tbl_info->value_list, + pt_find_attribute (parser, + jt_column->info.json_table_column_info.name, + tbl_info->attribute_list)); + if (col_result.m_output_value_pointer == NULL) + { + assert (false); + } + + col_result.m_domain = pt_xasl_node_to_domain (parser, jt_column); + + if (jt_column->info.json_table_column_info.path != NULL) + { + col_result.m_path = jt_column->info.json_table_column_info.path; + } + + col_result.m_column_name = (char *) jt_column->info.json_table_column_info.name->info.name.original; + + col_result.m_on_empty = jt_column->info.json_table_column_info.on_empty; + col_result.m_on_error = jt_column->info.json_table_column_info.on_error; +} + +// +// pt_make_json_table_spec_node_internal () - recursive function to generate json table access tree +// +// parser (in) : parser context +// jt_node_info (in) : json table parser node info +// current_id (in/out) : as input ID for this node, output next ID (after all nested nodes in current branch) +// tbl_info (in) : table info cache +// result (out) : a node in json table access tree based on json table node info +// +static void +pt_make_json_table_spec_node_internal (PARSER_CONTEXT * parser, PT_JSON_TABLE_NODE_INFO * jt_node_info, + size_t & current_id, TABLE_INFO * tbl_info, json_table_node & result) +{ + size_t i = 0; + PT_NODE *itr; + + // copy path + result.m_path = (char *) jt_node_info->path; + + // after set the id, increment + result.m_id = current_id++; + + // by default expand type is none + result.m_expand_type = json_table_expand_type::JSON_TABLE_NO_EXPAND; + + // set the expand type + if (json_table_node::str_ends_with (result.m_path, "[*]")) + { + result.m_expand_type = json_table_expand_type::JSON_TABLE_ARRAY_EXPAND; + } + else if (json_table_node::str_ends_with (result.m_path, ".*")) + { + result.m_expand_type = json_table_expand_type::JSON_TABLE_OBJECT_EXPAND; + } + + if (result.check_need_expand ()) + { + // trim the path to extract directly from this new path + result.set_parent_path (); + } + + // create columns + result.m_output_columns_size = 0; + for (itr = jt_node_info->columns; itr != NULL; itr = itr->next, ++result.m_output_columns_size) + ; + + result.m_output_columns = + (json_table_column *) pt_alloc_packing_buf (sizeof (json_table_column) * result.m_output_columns_size); + + for (itr = jt_node_info->columns, i = 0; itr != NULL; itr = itr->next, i++) + { + pt_create_json_table_column (parser, itr, tbl_info, result.m_output_columns[i]); + } + + // create children + result.m_nested_nodes_size = 0; + for (itr = jt_node_info->nested_paths; itr != NULL; itr = itr->next, ++result.m_nested_nodes_size) + ; + + result.m_nested_nodes = + (json_table_node *) pt_alloc_packing_buf (sizeof (json_table_node) * result.m_nested_nodes_size); + + for (itr = jt_node_info->nested_paths, i = 0; itr != NULL; itr = itr->next, i++) + { + pt_make_json_table_spec_node_internal (parser, &itr->info.json_table_node_info, current_id, tbl_info, + result.m_nested_nodes[i]); + } +} + +// +// pt_make_json_table_spec_node () - create json table access tree +// +// return : pointer to generated json_table_node +// parser (in) : parser context +// json_table (in) : json table parser node info +// start_id (in/out) : output total node count (root + nested) +// tbl_info (in) : table info cache +// +static json_table_node * +pt_make_json_table_spec_node (PARSER_CONTEXT * parser, PT_JSON_TABLE_INFO * json_table, size_t & start_id, + TABLE_INFO * tbl_info) +{ + json_table_node *root_node = (json_table_node *) pt_alloc_packing_buf (sizeof (json_table_node)); + pt_make_json_table_spec_node_internal (parser, &json_table->tree->info.json_table_node_info, start_id, tbl_info, + *root_node); + return root_node; +} + +// +// pt_make_json_table_access_spec () - make json access spec +// +// return : pointer to access spec +// parser (in) : parser context +// json_reguvar (in) : reguvar for json table expression +// where_pred (in) : json table scan filter predicate +// json_table (in) : json table parser node info +// tbl_info (in) : table info cache +// +static ACCESS_SPEC_TYPE * +pt_make_json_table_access_spec (PARSER_CONTEXT * parser, REGU_VARIABLE * json_reguvar, PRED_EXPR * where_pred, + PT_JSON_TABLE_INFO * json_table, TABLE_INFO * tbl_info) +{ + ACCESS_SPEC_TYPE *spec; + size_t start_id = 0; + + spec = pt_make_access_spec (TARGET_JSON_TABLE, ACCESS_METHOD_JSON_TABLE, NULL, NULL, where_pred, NULL); + + if (spec) + { + spec->s.json_table_node.m_root_node = pt_make_json_table_spec_node (parser, json_table, start_id, tbl_info); + spec->s.json_table_node.m_json_reguvar = json_reguvar; + // each node will have its own incremental id, so we can count the nr of nodes based on this identifier + spec->s.json_table_node.m_node_count = start_id; + } + + return spec; +} /* * pt_make_list_access_spec () - Create an initialized @@ -6136,7 +6330,11 @@ pt_function_to_regu (PARSER_CONTEXT * parser, PT_NODE * function) case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_CONTAINS_PATH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: + case F_JSON_SEARCH: case F_JSON_GET_ALL_PATHS: result_type = pt_node_to_db_type (function); break; @@ -6965,13 +7163,15 @@ pt_to_regu_variable (PARSER_CONTEXT * parser, PT_NODE * node, UNBOX unbox) || node->info.expr.op == PT_ADDTIME || node->info.expr.op == PT_DEFINE_VARIABLE || node->info.expr.op == PT_CHR || node->info.expr.op == PT_CLOB_TO_CHAR || node->info.expr.op == PT_INDEX_PREFIX || node->info.expr.op == PT_FROM_TZ - || node->info.expr.op == PT_JSON_TYPE + || node->info.expr.op == PT_JSON_TYPE || node->info.expr.op == PT_JSON_QUOTE + || node->info.expr.op == PT_JSON_UNQUOTE || node->info.expr.op == PT_JSON_EXTRACT || node->info.expr.op == PT_JSON_VALID || node->info.expr.op == PT_JSON_LENGTH || node->info.expr.op == PT_JSON_DEPTH - || node->info.expr.op == PT_JSON_SEARCH) + || node->info.expr.op == PT_JSON_PRETTY) { r1 = pt_to_regu_variable (parser, node->info.expr.arg1, unbox); - if ((node->info.expr.op == PT_CONCAT || node->info.expr.op == PT_JSON_LENGTH) + if ((node->info.expr.op == PT_CONCAT || node->info.expr.op == PT_JSON_LENGTH + || node->info.expr.op == PT_JSON_QUOTE || node->info.expr.op == PT_JSON_UNQUOTE) && node->info.expr.arg2 == NULL) { r2 = NULL; @@ -7022,7 +7222,7 @@ pt_to_regu_variable (PARSER_CONTEXT * parser, PT_NODE * node, UNBOX unbox) if (node->info.expr.op == PT_DATE_FORMAT || node->info.expr.op == PT_STR_TO_DATE || node->info.expr.op == PT_TIME_FORMAT || node->info.expr.op == PT_FORMAT - || node->info.expr.op == PT_INDEX_PREFIX || node->info.expr.op == PT_JSON_SEARCH) + || node->info.expr.op == PT_INDEX_PREFIX) { r3 = pt_to_regu_variable (parser, node->info.expr.arg3, unbox); } @@ -7460,8 +7660,14 @@ pt_to_regu_variable (PARSER_CONTEXT * parser, PT_NODE * node, UNBOX unbox) case PT_JSON_DEPTH: regu = pt_make_regu_arith (r1, NULL, NULL, T_JSON_DEPTH, domain); break; - case PT_JSON_SEARCH: - regu = pt_make_regu_arith (r1, r2, r3, T_JSON_SEARCH, domain); + case PT_JSON_QUOTE: + regu = pt_make_regu_arith (r1, NULL, NULL, T_JSON_QUOTE, domain); + break; + case PT_JSON_UNQUOTE: + regu = pt_make_regu_arith (r1, NULL, NULL, T_JSON_UNQUOTE, domain); + break; + case PT_JSON_PRETTY: + regu = pt_make_regu_arith (r1, NULL, NULL, T_JSON_PRETTY, domain); break; case PT_CONCAT_WS: regu = pt_make_regu_arith (r1, r2, r3, T_CONCAT_WS, domain); @@ -11976,6 +12182,24 @@ pt_to_cselect_table_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE return NULL; } +static ACCESS_SPEC_TYPE * +pt_to_json_table_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE * json_table, + PT_NODE * src_derived_tbl, PT_NODE * where_p) +{ + ACCESS_SPEC_TYPE *access; + + PRED_EXPR *where = pt_to_pred_expr (parser, where_p); + + TABLE_INFO *tbl_info = pt_find_table_info (spec->info.spec.id, parser->symbols->table_info); + assert (tbl_info != NULL); + + REGU_VARIABLE *regu_var = pt_to_regu_variable (parser, json_table->info.json_table_info.expr, UNBOX_AS_VALUE); + + access = pt_make_json_table_access_spec (parser, regu_var, where, &json_table->info.json_table_info, tbl_info); + + return access; +} + /* * pt_to_cte_table_spec_list () - Convert a PT_NODE CTE to an ACCESS_SPEC_LIST of representations of the classes to be selected from @@ -12110,11 +12334,24 @@ pt_to_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE * where_key_pa { access = pt_to_showstmt_spec_list (parser, spec, where_part); } - else + else if (spec->info.spec.derived_table_type == PT_IS_CSELECT) { /* a CSELECT derived table */ access = pt_to_cselect_table_spec_list (parser, spec, spec->info.spec.derived_table, src_derived_tbl); } + else if (spec->info.spec.derived_table_type == PT_DERIVED_JSON_TABLE) + { + /* PT_JSON_DERIVED_TABLE derived table */ + access = + pt_to_json_table_spec_list (parser, spec, spec->info.spec.derived_table, src_derived_tbl, where_part); + } + else + { + // unrecognized derived table type + assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_GENERIC_ERROR, 0); + return NULL; + } } else { @@ -12128,7 +12365,6 @@ pt_to_spec_list (PARSER_CONTEXT * parser, PT_NODE * spec, PT_NODE * where_key_pa return access; } - /* * pt_to_val_list () - * return: val_list corresponding to the entity spec @@ -12503,7 +12739,13 @@ pt_uncorr_post (PARSER_CONTEXT * parser, PT_NODE * node, void *arg, int *continu * After validation, the CTE XASL is added to the list */ PT_NODE *non_recursive_part = node->info.cte.non_recursive_part; - assert (PT_IS_QUERY (non_recursive_part)); + // non_recursive_part can become PT_VALUE during constant folding + assert (PT_IS_QUERY (non_recursive_part) || PT_IS_VALUE_NODE (non_recursive_part)); + if (PT_IS_VALUE_NODE (non_recursive_part)) + { + info->xasl = pt_append_xasl (xasl, info->xasl); + break; + } if (non_recursive_part->info.query.correlation_level == 0) { @@ -17145,6 +17387,7 @@ pt_make_aptr_parent_node (PARSER_CONTEXT * parser, PT_NODE * node, PROC_TYPE typ REGU_VARIABLE_LIST regu_attributes; xasl = regu_xasl_node_alloc (type); + if (xasl != NULL && node != NULL) { if (PT_IS_QUERY_NODE_TYPE (node->node_type)) @@ -18766,6 +19009,7 @@ pt_mark_spec_list_for_update_clause (PARSER_CONTEXT * parser, PT_NODE * statemen * parser(in): * select_list(in): * from(in): + * with(in): * class_specs(in): * where(in): * using_index(in): @@ -18783,7 +19027,7 @@ pt_mark_spec_list_for_update_clause (PARSER_CONTEXT * parser, PT_NODE * statemen */ PT_NODE * pt_to_upd_del_query (PARSER_CONTEXT * parser, PT_NODE * select_names, PT_NODE * select_list, PT_NODE * from, - PT_NODE * class_specs, PT_NODE * where, PT_NODE * using_index, PT_NODE * order_by, + PT_NODE * with, PT_NODE * class_specs, PT_NODE * where, PT_NODE * using_index, PT_NODE * order_by, PT_NODE * orderby_for, int server_op, SCAN_OPERATION_TYPE scan_op_type) { PT_NODE *statement = NULL, *from_temp = NULL, *node = NULL; @@ -18794,6 +19038,8 @@ pt_to_upd_del_query (PARSER_CONTEXT * parser, PT_NODE * select_names, PT_NODE * statement = parser_new_node (parser, PT_SELECT); if (statement != NULL) { + statement->info.query.with = with; + /* this is an internally built query */ PT_SELECT_INFO_SET_FLAG (statement, PT_SELECT_INFO_IS_UPD_DEL_QUERY); @@ -19119,6 +19365,7 @@ pt_to_delete_xasl (PARSER_CONTEXT * parser, PT_NODE * statement) PT_NODE *aptr_statement = NULL; PT_NODE *from; PT_NODE *where; + PT_NODE *with; PT_NODE *using_index; PT_NODE *class_specs; PT_NODE *cl_name_node; @@ -19137,6 +19384,7 @@ pt_to_delete_xasl (PARSER_CONTEXT * parser, PT_NODE * statement) where = statement->info.delete_.search_cond; using_index = statement->info.delete_.using_index; class_specs = statement->info.delete_.class_specs; + with = statement->info.delete_.with; if (from && from->node_type == PT_SPEC && from->info.spec.range_var) { @@ -19226,7 +19474,7 @@ pt_to_delete_xasl (PARSER_CONTEXT * parser, PT_NODE * statement) } if (((aptr_statement = - pt_to_upd_del_query (parser, NULL, select_list, from, class_specs, where, using_index, NULL, NULL, 1, + pt_to_upd_del_query (parser, NULL, select_list, from, with, class_specs, where, using_index, NULL, NULL, 1, S_DELETE)) == NULL) || pt_copy_upddel_hints_to_select (parser, statement, aptr_statement) != NO_ERROR || ((aptr_statement = mq_translate (parser, aptr_statement)) == NULL)) @@ -19710,6 +19958,7 @@ pt_to_update_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE ** non_ int num_cond_reev_classes = 0; PT_NODE *from = NULL; PT_NODE *where = NULL; + PT_NODE *with = NULL; PT_NODE *using_index = NULL; PT_NODE *class_specs = NULL; int cl = 0, cls_idx = 0, num_vals = 0, num_consts = 0; @@ -19747,6 +19996,7 @@ pt_to_update_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE ** non_ class_specs = statement->info.update.class_specs; order_by = statement->info.update.order_by; orderby_for = statement->info.update.orderby_for; + with = statement->info.update.with; /* flush all classes */ p = from; @@ -19817,7 +20067,7 @@ pt_to_update_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE ** non_ } aptr_statement = - pt_to_upd_del_query (parser, select_names, select_values, from, class_specs, where, using_index, order_by, + pt_to_upd_del_query (parser, select_names, select_values, from, with, class_specs, where, using_index, order_by, orderby_for, 1, S_UPDATE); /* restore assignment list here because we need to iterate through assignments later */ pt_restore_assignment_links (statement->info.update.assignment, links, -1); @@ -19843,7 +20093,10 @@ pt_to_update_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE ** non_ aptr_statement = mq_translate (parser, aptr_statement); if (aptr_statement == NULL) { - assert (er_errid () != NO_ERROR); + if (pt_has_error (parser)) + { + pt_report_to_ersys_with_statement (parser, PT_SEMANTIC, aptr_statement); + } error = er_errid (); if (error == NO_ERROR) { @@ -21846,35 +22099,46 @@ pt_get_var_regu_variable_p_list (const REGU_VARIABLE * regu, bool is_prior, int /* * pt_add_regu_var_to_list () - adds a regu list node to another regu list - * return: - * regu_list_dst(in/out): - * regu_list_node(in/out): + * return: + * destination (in/out) : + * source (in/out) : */ static void -pt_add_regu_var_to_list (REGU_VARIABLE_LIST * regu_list_dst, REGU_VARIABLE_LIST regu_list_node) +pt_add_regu_var_to_list (REGU_VARIABLE_LIST * destination, REGU_VARIABLE_LIST source) { - REGU_VARIABLE_LIST rl; + source->next = NULL; + + pt_merge_regu_var_lists (destination, source); +} - regu_list_node->next = NULL; +/* + * pt_merge_regu_var_lists () - appends the source to the end of the destination regu var list + * return: + * destination (in/out): + * source (in/out): + */ +static void +pt_merge_regu_var_lists (REGU_VARIABLE_LIST * destination, REGU_VARIABLE_LIST source) +{ + REGU_VARIABLE_LIST itr; - if (!*regu_list_dst) + if ((*destination) == NULL) { - *regu_list_dst = regu_list_node; + *destination = source; } else { - rl = *regu_list_dst; - while (rl->next) - { - rl = rl->next; - } - rl->next = regu_list_node; + // get the end of the list + for (itr = *destination; itr->next != NULL; itr = itr->next) + ; + + // append it + itr->next = source; } } /* - * pt_build_do_stmt_aptr_list_pre () - build an XASL list of top level - * queries + * pt_build_do_stmt_aptr_list_pre () - build an XASL list of top level queries * returns: original node * node(in): node to check * arg(out): first node in list @@ -24273,11 +24537,12 @@ pt_to_merge_update_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE * aptr_statement = mq_translate (parser, aptr_statement); if (aptr_statement == NULL) { -#if 0 /* TODO */ - assert (er_errid () != NO_ERROR); -#endif + if (pt_has_error (parser)) + { + pt_report_to_ersys_with_statement (parser, PT_SEMANTIC, aptr_statement); + } error = er_errid (); - if (error == NO_ERROR && !pt_has_error (parser)) + if (error == NO_ERROR) { error = ER_GENERIC_ERROR; er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 0); @@ -24718,11 +24983,12 @@ pt_to_merge_insert_xasl (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE * aptr_statement = mq_translate (parser, aptr_statement); if (aptr_statement == NULL) { -#if 0 /* TODO */ - assert (er_errid () != NO_ERROR); -#endif + if (pt_has_error (parser)) + { + pt_report_to_ersys_with_statement (parser, PT_SEMANTIC, aptr_statement); + } error = er_errid (); - if (error == NO_ERROR && !pt_has_error (parser)) + if (error == NO_ERROR) { error = ER_GENERIC_ERROR; er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 0); @@ -25391,75 +25657,131 @@ pt_set_limit_optimization_flags (PARSER_CONTEXT * parser, QO_PLAN * qo_plan, XAS } /* - * update_value_list_out_list_regu_list () - - * update the related lists for pt_to_aggregate_node - * - * return : - * info (in/out) : - * value_list (in) : - * out_list (in) : - * regu_list (in) : - * regu (in) : + * pt_aggregate_info_append_value_list () - Appends the value_list in the aggregate info->value_list, increasing also + * the val_cnt + * info (in/out) : + * value_list (in) : */ static void -update_value_list_out_list_regu_list (AGGREGATE_INFO * info, VAL_LIST * value_list, REGU_VARIABLE_LIST out_list, - REGU_VARIABLE_LIST regu_list, REGU_VARIABLE * regu) +pt_aggregate_info_append_value_list (AGGREGATE_INFO * info, VAL_LIST * value_list) { + assert (info != NULL && info->value_list != NULL && value_list != NULL); + + // increase the size with the number of elements in the value_list + info->value_list->val_cnt += value_list->val_cnt; + QPROC_DB_VALUE_LIST value_temp = NULL; - REGU_VARIABLE_LIST regu_temp = NULL; - assert (info != NULL && info->value_list != NULL && info->out_list != NULL && info->regu_list != NULL - && value_list != NULL && out_list != NULL && regu_list != NULL && regu != NULL); + // get the end of the list + for (value_temp = info->value_list->valp; value_temp->next != NULL; value_temp = value_temp->next) + ; + + assert (value_temp != NULL); + + // append to the end + value_temp->next = value_list->valp; +} + +/* + * pt_aggregate_info_update_value_and_reguvar_lists () - Merges the arguments in the aggregate info corresponding lists + * info (in/out) : + * value_list (in) : + * regu_position_list (in) : + * regu_constant_list (in) : + */ +static void +pt_aggregate_info_update_value_and_reguvar_lists (AGGREGATE_INFO * info, VAL_LIST * value_list, + REGU_VARIABLE_LIST regu_position_list, + REGU_VARIABLE_LIST regu_constant_list) +{ + pt_aggregate_info_append_value_list (info, value_list); + + pt_merge_regu_var_lists (&info->regu_list, regu_position_list); - /* append value holder to value_list */ - info->value_list->val_cnt++; + pt_merge_regu_var_lists (&info->out_list->valptrp, regu_constant_list); - value_temp = info->value_list->valp; - while (value_temp->next) + // also increment list count + size_t regu_constant_list_size = 0; + + for (REGU_VARIABLE_LIST ptr = regu_constant_list; ptr != NULL; ptr = ptr->next, regu_constant_list_size++) + ; + + info->out_list->valptr_cnt += regu_constant_list_size; +} + +/* + * pt_aggregate_info_update_scan_regu_list () - Merges scan_regu_list in the aggregate info->scan_regu_list + * info (in/out) : + * scan_regu_list (in) : + */ +static void +pt_aggregate_info_update_scan_regu_list (AGGREGATE_INFO * info, REGU_VARIABLE_LIST scan_regu_list) +{ + REGU_VARIABLE_LIST tail = NULL; + size_t scan_regu_list_size = 0; + size_t index = 0; + + // calculate the size of scan_regu_var_list + for (tail = scan_regu_list; tail != NULL; tail = tail->next, scan_regu_list_size++) + ; + + // start fetching for the last scan_regu_var_list_size elements + index = info->value_list->val_cnt - scan_regu_list_size; + + for (REGU_VARIABLE_LIST itr = scan_regu_list; itr != NULL; itr = itr->next) { - value_temp = value_temp->next; + // get the value from the value_list + itr->value.vfetch_to = pt_index_value (info->value_list, index++); } - value_temp->next = value_list->valp; - /* append out_list to info->out_list */ - info->out_list->valptr_cnt++; - out_list->next = NULL; - out_list->value = *regu; + // append scan_regu_list to info + pt_merge_regu_var_lists (&info->scan_regu_list, scan_regu_list); +} - regu_temp = info->out_list->valptrp; - while (regu_temp->next) +/* + * pt_node_list_to_value_and_reguvar_list () - Constructs the value_list and regu_position_list from node + * parser (in) : + * node (in) : + * value_list (in/out) : + * regu_position_list (in/out) : + */ +static PT_NODE * +pt_node_list_to_value_and_reguvar_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST ** value_list, + REGU_VARIABLE_LIST * regu_position_list) +{ + assert (node != NULL && value_list != NULL); + + *value_list = pt_make_val_list (parser, node); + + if (*value_list == NULL) { - regu_temp = regu_temp->next; + return NULL; } - regu_temp->next = out_list; - /* append regu to info->regu_list */ - regu_temp = info->regu_list; - while (regu_temp->next) + if (pt_make_regu_list_from_value_list (parser, node, *value_list, regu_position_list) == NULL) { - regu_temp = regu_temp->next; + return NULL; } - regu_temp->next = regu_list; + + return node; } /* - * pt_alloc_value_list_out_list_regu_list () - - * parser (in) : - * node (in) : - * value_list (in/out) : - * out_list (in/out) : - * regu_list (in/out) : + * pt_make_regu_list_from_value_list () - creates a regu_list from value_list with TYPE POSITION + * parser (in) : + * node (in) : + * value_list (in) : + * regu_list (in/out) : */ static PT_NODE * -pt_alloc_value_list_out_list_regu_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST ** value_list, - REGU_VARIABLE_LIST * out_list, REGU_VARIABLE_LIST * regu_list) +pt_make_regu_list_from_value_list (PARSER_CONTEXT * parser, PT_NODE * node, VAL_LIST * value_list, + REGU_VARIABLE_LIST * regu_list) { + assert (node != NULL && value_list != NULL && regu_list != NULL); + int *attr_offsets = NULL; bool out_of_memory = false; - assert (node != NULL && value_list != NULL && out_list != NULL && regu_list != NULL); - - /* begin alloc */ attr_offsets = pt_make_identity_offsets (node); if (attr_offsets == NULL) { @@ -25467,29 +25789,14 @@ pt_alloc_value_list_out_list_regu_list (PARSER_CONTEXT * parser, PT_NODE * node, goto end; } - *value_list = pt_make_val_list (parser, node); - if (*value_list == NULL) - { - out_of_memory = true; - goto end; - } - - *regu_list = pt_to_position_regu_variable_list (parser, node, *value_list, attr_offsets); + *regu_list = pt_to_position_regu_variable_list (parser, node, value_list, attr_offsets); if (*regu_list == NULL) { out_of_memory = true; goto end; } - *out_list = regu_varlist_alloc (); - if (*out_list == NULL) - { - out_of_memory = true; - goto end; - } - end: - if (attr_offsets != NULL) { free_and_init (attr_offsets); @@ -25504,6 +25811,61 @@ pt_alloc_value_list_out_list_regu_list (PARSER_CONTEXT * parser, PT_NODE * node, return node; } +/* + * pt_make_constant_regu_list_from_val_list () - creates a regu list with constant type from value_list + * parser (in) : + * value_list (in) : + * regu_list (in/out) : + */ +static int +pt_make_constant_regu_list_from_val_list (PARSER_CONTEXT * parser, VAL_LIST * value_list, + REGU_VARIABLE_LIST * regu_list) +{ + assert (*regu_list == NULL); + + size_t value_list_size = value_list->val_cnt; + QPROC_DB_VALUE_LIST crt_val = value_list->valp; + REGU_VARIABLE_LIST last = NULL; + + for (size_t i = 0; i < value_list_size; i++, crt_val = crt_val->next) + { + REGU_VARIABLE_LIST crt_regu = regu_varlist_alloc (); + if (crt_regu == NULL) + { + return ER_OUT_OF_VIRTUAL_MEMORY; + } + + crt_regu->value.type = TYPE_CONSTANT; + crt_regu->value.domain = crt_val->dom; + crt_regu->value.value.dbvalptr = crt_val->val; + + // set head + if (*regu_list == NULL) + { + *regu_list = crt_regu; + last = *regu_list; + } + // append + else + { + last->next = crt_regu; + last = last->next; + } + } + + return NO_ERROR; +} + +static void +pt_set_regu_list_pos_descr_from_idx (REGU_VARIABLE_LIST & regu_list, size_t starting_index) +{ + for (REGU_VARIABLE_LIST crt_regu = regu_list; crt_regu != NULL; crt_regu = crt_regu->next) + { + assert (crt_regu->value.type == TYPE_POSITION); + crt_regu->value.value.pos_descr.pos_no = starting_index++; + } +} + /* * pt_fix_interpolation_aggregate_function_order_by () - * diff --git a/src/parser/xasl_generation.h b/src/parser/xasl_generation.h index c003edf85fd..494b85deb11 100644 --- a/src/parser/xasl_generation.h +++ b/src/parser/xasl_generation.h @@ -132,8 +132,8 @@ extern REGU_VARIABLE *pt_make_regu_arith (const REGU_VARIABLE * arg1, const REGU extern TP_DOMAIN *pt_xasl_type_enum_to_domain (const PT_TYPE_ENUM type); extern TP_DOMAIN *pt_xasl_node_to_domain (PARSER_CONTEXT * parser, const PT_NODE * node); extern PT_NODE *pt_to_upd_del_query (PARSER_CONTEXT * parser, PT_NODE * select_names, PT_NODE * select_list, - PT_NODE * from, PT_NODE * class_specs, PT_NODE * where, PT_NODE * using_index, - PT_NODE * order_by, PT_NODE * orderby_for, int server_op, + PT_NODE * from, PT_NODE * with, PT_NODE * class_specs, PT_NODE * where, + PT_NODE * using_index, PT_NODE * order_by, PT_NODE * orderby_for, int server_op, SCAN_OPERATION_TYPE scan_op_type); extern XASL_NODE *pt_to_insert_xasl (PARSER_CONTEXT * parser, PT_NODE * node); extern PRED_EXPR_WITH_CONTEXT *pt_to_pred_with_context (PARSER_CONTEXT * parser, PT_NODE * filter_pred, PT_NODE * spec); diff --git a/src/query/arithmetic.c b/src/query/arithmetic.c index 5cfc3693bd1..a694804f418 100644 --- a/src/query/arithmetic.c +++ b/src/query/arithmetic.c @@ -41,6 +41,7 @@ #include "numeric_opfunc.h" #include "crypt_opfunc.h" #include "string_opfunc.h" +#include "tz_support.h" #include "db_date.h" #include "db_json.hpp" @@ -1978,7 +1979,7 @@ db_mod_dbval (DB_VALUE * result, DB_VALUE * value1, DB_VALUE * value2) static double round_double (double num, double integer) { - /* + /* * Under high optimization level, some optimizers (e.g, gcc -O3 on linux) * generates a wrong result without "volatile". */ @@ -3139,7 +3140,7 @@ db_log_dbval (DB_VALUE * result, DB_VALUE * value1, DB_VALUE * value2) static double truncate_double (double num, double integer) { - /* + /* * Under high optimization level, some optimizers (e.g, gcc -O3 on linux) * generates a wrong result without "volatile". */ @@ -4828,7 +4829,7 @@ db_width_bucket (DB_VALUE * result, const DB_VALUE * value1, const DB_VALUE * va if (type == DB_TYPE_BIGINT) { - /* cast bigint to numeric Compiler doesn't support long double (80 or 128bits), so we use numeric instead. If + /* cast bigint to numeric Compiler doesn't support long double (80 or 128bits), so we use numeric instead. If * a high precision lib is introduced or long double is full supported, remove this part and use the lib or * long double to calculate. */ /* convert value1 */ @@ -5141,7 +5142,7 @@ db_json_type_dbval (const DB_VALUE * json, DB_VALUE * type_res) type = db_json_get_type_as_str (db_get_json_document (json)); length = strlen (type); - return db_make_char (type_res, length, (DB_C_CHAR) type, length, LANG_COERCIBLE_CODESET, LANG_COERCIBLE_COLL); + return db_make_varchar (type_res, length, (DB_C_CHAR) type, length, LANG_COERCIBLE_CODESET, LANG_COERCIBLE_COLL); } } @@ -5224,6 +5225,198 @@ db_json_depth_dbval (DB_VALUE * json, DB_VALUE * res) } } +int +db_json_unquote_dbval (DB_VALUE * json, DB_VALUE * res) +{ + int error_code; + + if (DB_IS_NULL (json)) + { + error_code = db_make_null (res); + } + else + { + char *str = NULL; + + error_code = db_json_unquote (*db_get_json_document (json), str); + if (error_code != NO_ERROR) + { + return error_code; + } + + error_code = db_make_string (res, str); + if (error_code != NO_ERROR) + { + return error_code; + } + + // db_json_unquote uses strdup, therefore set need_clear flag + res->need_clear = true; + } + + return error_code; +} + +int +db_json_pretty_dbval (DB_VALUE * json, DB_VALUE * res) +{ + int error_code; + + if (DB_IS_NULL (json)) + { + error_code = db_make_null (res); + } + else + { + char *str = NULL; + + db_json_pretty_func (*db_get_json_document (json), str); + + error_code = db_make_string (res, str); + if (error_code != NO_ERROR) + { + return error_code; + } + + + // db_json_pretty_func uses strdup, therefore set need_clear flag + res->need_clear = true; + } + + return error_code; +} + +int +db_json_arrayagg_dbval_accumulate (DB_VALUE * json, DB_VALUE * json_res) +{ + JSON_DOC *this_doc; + JSON_DOC *result_doc = NULL; + int error_code = NO_ERROR; + + if (DB_IS_NULL (json)) + { + // this case should not be possible because we already wrapped a NULL value into a JSON with type DB_JSON_NULL + assert (false); + db_make_null (json_res); + return ER_FAILED; + } + + // get the current value + this_doc = db_get_json_document (json); + + // append to existing document + // allocate only first time + if (DB_IS_NULL (json_res)) + { + result_doc = db_json_allocate_doc (); + db_make_json (json_res, result_doc, true); + } + else + { + result_doc = db_get_json_document (json_res); + } + + db_json_arrayagg_func_accumulate (this_doc, *result_doc); + + if (result_doc == NULL) + { + db_make_null (json_res); + return ER_FAILED; + } + + return error_code; +} + +/* + * db_json_objectagg_dbval_accumulate () - Construct a Member (key-value pair) and add it in the result_json + * + * return : error_code + * json_key (in) : the key of the pair + * json_val (in) : the value of the pair + * json_res (in) : the DB_VALUE that contains the document where we want to insert + */ +int +db_json_objectagg_dbval_accumulate (DB_VALUE * json_key, DB_VALUE * json_val, DB_VALUE * json_res) +{ + JSON_DOC *val_doc; + const char *key_str = NULL; + JSON_DOC *result_doc = NULL; + int error_code = NO_ERROR; + + // this case should not be possible because we checked before if the key is NULL + // and wrapped the value with a JSON with DB_JSON_NULL type + if (DB_IS_NULL (json_key) || DB_IS_NULL (json_val)) + { + assert (false); + db_make_null (json_res); + return ER_FAILED; + } + + // get the current key + key_str = db_get_string (json_key); + + // get the current value + val_doc = db_get_json_document (json_val); + + // append to existing document + // allocate only first time + if (DB_IS_NULL (json_res)) + { + result_doc = db_json_allocate_doc (); + db_make_json (json_res, result_doc, true); + } + else + { + result_doc = db_get_json_document (json_res); + } + + db_json_objectagg_func_accumulate (key_str, val_doc, *result_doc); + + if (result_doc == NULL) + { + db_make_null (json_res); + return ER_FAILED; + } + + return NO_ERROR; +} + +/* + * db_json_merge () - Inserts a JSON_OBJECT/JSON_ARRAY with possibly multiple members in the result_json + * + * return : error_code + * json (in) : the JSON_OBJECT/JSON_ARRAY that we want to insert + * json_res (in) : the DB_VALUE that contains the document where we want to insert + */ +int +db_json_merge (DB_VALUE * json, DB_VALUE * json_res) +{ + // this case should not be possible because we did the checking before + // also the method should be called after we already created the json_res (in the first iteration) + if (DB_IS_NULL (json) || DB_IS_NULL (json_res)) + { + assert (false); + db_make_null (json_res); + return ER_FAILED; + } + + JSON_DOC *current_doc = NULL; + JSON_DOC *result_doc = NULL; + + // get the current document that we want to insert + current_doc = db_get_json_document (json); + + assert (db_value_domain_type (json_res) == DB_TYPE_JSON); + + // get the resulting json document + result_doc = db_get_json_document (json_res); + + // merge the two jsons (preserve, not patch) + db_json_merge_func (current_doc, result_doc, false); + + return NO_ERROR; +} + int db_json_extract_dbval (const DB_VALUE * json, const DB_VALUE * path, DB_VALUE * json_res) { diff --git a/src/query/arithmetic.h b/src/query/arithmetic.h index bd110919228..00cc713e36e 100644 --- a/src/query/arithmetic.h +++ b/src/query/arithmetic.h @@ -74,5 +74,10 @@ extern int db_json_extract_dbval (const DB_VALUE * json, const DB_VALUE * path, extern int db_json_valid_dbval (const DB_VALUE * json, DB_VALUE * type_res); extern int db_json_length_dbval (const DB_VALUE * json, const DB_VALUE * path, DB_VALUE * res); extern int db_json_depth_dbval (DB_VALUE * json, DB_VALUE * res); +extern int db_json_unquote_dbval (DB_VALUE * json, DB_VALUE * res); +extern int db_json_pretty_dbval (DB_VALUE * json, DB_VALUE * res); +extern int db_json_arrayagg_dbval_accumulate (DB_VALUE * dbval, DB_VALUE * res); +extern int db_json_objectagg_dbval_accumulate (DB_VALUE * json_key, DB_VALUE * json_val, DB_VALUE * json_res); +extern int db_json_merge (DB_VALUE * json, DB_VALUE * json_res); extern int db_least_or_greatest (DB_VALUE * arg1, DB_VALUE * arg2, DB_VALUE * result, bool least); #endif /* _ARITHMETIC_H_ */ diff --git a/src/query/cursor.h b/src/query/cursor.h index 536de697a30..aa837c08442 100644 --- a/src/query/cursor.h +++ b/src/query/cursor.h @@ -29,7 +29,6 @@ #include "config.h" #include "error_manager.h" -#include "dbdef.h" #include "query_list.h" #include "storage_common.h" #include "object_primitive.h" diff --git a/src/query/execute_statement.c b/src/query/execute_statement.c index 28e5bb28cd6..1e0204a1849 100644 --- a/src/query/execute_statement.c +++ b/src/query/execute_statement.c @@ -46,7 +46,6 @@ #include "error_manager.h" #include "db.h" #include "dbi.h" -#include "dbdef.h" #include "dbtype.h" #include "parser.h" #include "porting.h" @@ -86,6 +85,7 @@ #include "xasl_to_stream.h" #include "query_cl.h" #include "parser_support.h" +#include "tz_support.h" #include "dbtype.h" #if defined (SUPPRESS_STRLEN_WARNING) @@ -6818,9 +6818,9 @@ static int update_savepoint_number = 0; static void unlink_list (PT_NODE * list); static QFILE_LIST_ID *get_select_list_to_update (PARSER_CONTEXT * parser, PT_NODE * from, PT_NODE * column_names, - PT_NODE * column_values, PT_NODE * where, PT_NODE * order_by, - PT_NODE * orderby_for, PT_NODE * using_index, PT_NODE * class_specs, - PT_NODE * update_stmt); + PT_NODE * column_values, PT_NODE * with, PT_NODE * where, + PT_NODE * order_by, PT_NODE * orderby_for, PT_NODE * using_index, + PT_NODE * class_specs, PT_NODE * update_stmt); static int update_object_attribute (PARSER_CONTEXT * parser, DB_OTMPL * otemplate, PT_NODE * name, DB_ATTDESC * attr_desc, DB_VALUE * value); static int update_object_tuple (PARSER_CONTEXT * parser, CLIENT_UPDATE_INFO * assigns, int assigns_count, @@ -6873,6 +6873,7 @@ unlink_list (PT_NODE * list) * parser(in): Parser context * from(in): Parse tree of an FROM class * column_values(in): Column list in SELECT clause + * with(in): WITH clause * where(in): WHERE clause * order_by(in): ORDER BY clause * orderby_num(in): converted from ORDER BY with LIMIT @@ -6883,8 +6884,8 @@ unlink_list (PT_NODE * list) */ static QFILE_LIST_ID * get_select_list_to_update (PARSER_CONTEXT * parser, PT_NODE * from, PT_NODE * column_names, PT_NODE * column_values, - PT_NODE * where, PT_NODE * order_by, PT_NODE * orderby_for, PT_NODE * using_index, - PT_NODE * class_specs, PT_NODE * update_stmt) + PT_NODE * with, PT_NODE * where, PT_NODE * order_by, PT_NODE * orderby_for, + PT_NODE * using_index, PT_NODE * class_specs, PT_NODE * update_stmt) { PT_NODE *statement = NULL; QFILE_LIST_ID *result = NULL; @@ -6893,8 +6894,9 @@ get_select_list_to_update (PARSER_CONTEXT * parser, PT_NODE * from, PT_NODE * co assert (parser->query_id == NULL_QUERY_ID); if (from && (from->node_type == PT_SPEC) && from->info.spec.range_var - && ((statement = pt_to_upd_del_query (parser, column_names, column_values, from, class_specs, where, using_index, - order_by, orderby_for, 0 /* not server update */ , S_UPDATE)) != NULL)) + && ((statement = + pt_to_upd_del_query (parser, column_names, column_values, from, with, class_specs, where, using_index, + order_by, orderby_for, 0 /* not server update */ , S_UPDATE)) != NULL)) { err = pt_copy_upddel_hints_to_select (parser, update_stmt, statement); if (err != NO_ERROR) @@ -8493,9 +8495,10 @@ update_real_class (PARSER_CONTEXT * parser, PT_NODE * statement, bool savepoint_ /* get the oid's and new values */ oid_list = get_select_list_to_update (parser, statement->info.update.spec, select_names, select_values, - statement->info.update.search_cond, statement->info.update.order_by, - statement->info.update.orderby_for, statement->info.update.using_index, - statement->info.update.class_specs, statement); + statement->info.update.with, statement->info.update.search_cond, + statement->info.update.order_by, statement->info.update.orderby_for, + statement->info.update.using_index, statement->info.update.class_specs, + statement); /* restore tree structure */ pt_restore_assignment_links (statement->info.update.assignment, links, -1); @@ -9055,9 +9058,9 @@ do_prepare_update (PARSER_CONTEXT * parser, PT_NODE * statement) /* make sure that lhs->info.name.meta_class != PT_META_ATTR */ select_statement = pt_to_upd_del_query (parser, select_names, select_values, statement->info.update.spec, - statement->info.update.class_specs, statement->info.update.search_cond, - statement->info.update.using_index, statement->info.update.order_by, - statement->info.update.orderby_for, 0, S_UPDATE); + statement->info.update.with, statement->info.update.class_specs, + statement->info.update.search_cond, statement->info.update.using_index, + statement->info.update.order_by, statement->info.update.orderby_for, 0, S_UPDATE); /* restore tree structure; pt_get_assignment_lists() */ pt_restore_assignment_links (statement->info.update.assignment, links, -1); @@ -9417,12 +9420,12 @@ select_delete_list (PARSER_CONTEXT * parser, QFILE_LIST_ID ** result_p, PT_NODE int ret = NO_ERROR; assert (parser->query_id == NULL_QUERY_ID); + assert (delete_stmt->info.delete_.with == NULL); - statement = pt_to_upd_del_query (parser, NULL, NULL, delete_stmt->info.delete_.spec, delete_stmt->info.delete_.class_specs, delete_stmt->info.delete_.search_cond, delete_stmt->info.delete_.using_index, NULL, NULL, 0 /* not - * server - * update - */ , - S_DELETE); + statement = pt_to_upd_del_query (parser, NULL, NULL, delete_stmt->info.delete_.spec, delete_stmt->info.delete_.with, + delete_stmt->info.delete_.class_specs, delete_stmt->info.delete_.search_cond, + delete_stmt->info.delete_.using_index, NULL, NULL, + 0 /* not server update */ , S_DELETE); if (statement != NULL) { ret = pt_copy_upddel_hints_to_select (parser, delete_stmt, statement); @@ -10343,8 +10346,9 @@ do_prepare_delete (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE * paren PT_DELETE_INFO *delete_info; delete_info = &statement->info.delete_; + select_statement = - pt_to_upd_del_query (parser, NULL, NULL, delete_info->spec, delete_info->class_specs, + pt_to_upd_del_query (parser, NULL, NULL, delete_info->spec, delete_info->with, delete_info->class_specs, delete_info->search_cond, delete_info->using_index, NULL, NULL, 0, S_DELETE); err = pt_copy_upddel_hints_to_select (parser, statement, select_statement); if (err != NO_ERROR) @@ -13405,6 +13409,7 @@ do_prepare_insert (PARSER_CONTEXT * parser, PT_NODE * statement) PT_NODE *values = NULL; PT_NODE *attr_list; PT_NODE *update = NULL; + PT_NODE *with = NULL; int save_au; if (statement == NULL || statement->node_type != PT_INSERT || statement->info.insert.spec == NULL @@ -17497,8 +17502,7 @@ do_insert_checks (PARSER_CONTEXT * parser, PT_NODE * statement, PT_NODE ** class } /* Check whether the statement can be executed with commit. */ - if (statement->info.insert.server_allowed == SERVER_INSERT_IS_ALLOWED - && statement->info.insert.odku_assignments == NULL) + if (statement->info.insert.server_allowed == SERVER_INSERT_IS_ALLOWED) { /* Check statement insert trigger. */ error = sm_class_has_triggers ((*class_)->info.name.db_object, &trigger_involved, TR_EVENT_STATEMENT_INSERT); diff --git a/src/query/fetch.c b/src/query/fetch.c index 478bd12400e..f6b939d453a 100644 --- a/src/query/fetch.c +++ b/src/query/fetch.c @@ -46,6 +46,7 @@ #include "string_opfunc.h" #include "server_interface.h" #include "query_opfunc.h" +#include "tz_support.h" #include "db_date.h" #include "xasl.h" #include "query_executor.h" @@ -649,19 +650,18 @@ fetch_peek_arith (THREAD_ENTRY * thread_p, REGU_VARIABLE * regu_var, VAL_DESCR * } break; + case T_JSON_QUOTE: + case T_JSON_UNQUOTE: case T_JSON_TYPE: case T_JSON_VALID: case T_JSON_DEPTH: + case T_JSON_PRETTY: if (fetch_peek_dbval (thread_p, arithptr->leftptr, vd, NULL, obj_oid, tpl, &peek_left) != NO_ERROR) { goto error; } break; - case T_JSON_SEARCH: - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_DB_UNIMPLEMENTED, 1, "JSON_SEARCH"); - goto error; - case T_JSON_CONTAINS: if (fetch_peek_dbval (thread_p, arithptr->leftptr, vd, NULL, obj_oid, tpl, &peek_left) != NO_ERROR) { @@ -2644,6 +2644,13 @@ fetch_peek_arith (THREAD_ENTRY * thread_p, REGU_VARIABLE * regu_var, VAL_DESCR * } break; + case T_JSON_PRETTY: + if (qdata_json_pretty_dbval (peek_left, arithptr->value, regu_var->domain) != NO_ERROR) + { + goto error; + } + break; + case T_JSON_EXTRACT: if (qdata_json_extract_dbval (peek_left, peek_right, arithptr->value, regu_var->domain) != NO_ERROR) { @@ -2673,6 +2680,20 @@ fetch_peek_arith (THREAD_ENTRY * thread_p, REGU_VARIABLE * regu_var, VAL_DESCR * } break; + case T_JSON_QUOTE: + if (qdata_json_quote_dbval (peek_left, arithptr->value, regu_var->domain) != NO_ERROR) + { + goto error; + } + break; + + case T_JSON_UNQUOTE: + if (qdata_json_unquote_dbval (peek_left, arithptr->value, regu_var->domain) != NO_ERROR) + { + goto error; + } + break; + case T_CONCAT: if (arithptr->rightptr != NULL) { @@ -4040,8 +4061,12 @@ fetch_peek_dbval (THREAD_ENTRY * thread_p, REGU_VARIABLE * regu_var, VAL_DESCR * case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_CONTAINS_PATH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: + case F_JSON_SEARCH: { REGU_VARIABLE_LIST operand; @@ -4232,7 +4257,11 @@ fetch_peek_dbval (THREAD_ENTRY * thread_p, REGU_VARIABLE * regu_var, VAL_DESCR * case F_JSON_KEYS: case F_JSON_REMOVE: case F_JSON_ARRAY_APPEND: + case F_JSON_ARRAY_INSERT: + case F_JSON_CONTAINS_PATH: + case F_JSON_SEARCH: case F_JSON_MERGE: + case F_JSON_MERGE_PATCH: case F_JSON_GET_ALL_PATHS: break; diff --git a/src/query/numeric_opfunc.c b/src/query/numeric_opfunc.c index 64c776789c0..ffd53addc0a 100644 --- a/src/query/numeric_opfunc.c +++ b/src/query/numeric_opfunc.c @@ -35,6 +35,7 @@ #include "mprec.h" #include "numeric_opfunc.h" +#include "tz_support.h" #include "db_date.h" #include "memory_alloc.h" #include "system_parameter.h" diff --git a/src/query/partition_sr.h b/src/query/partition_sr.h index 8dd6925a287..eb56ea4a519 100644 --- a/src/query/partition_sr.h +++ b/src/query/partition_sr.h @@ -32,7 +32,6 @@ #include "thread_compat.hpp" #include "xasl.h" - /* object for caching objects used in multi row modify statements for each partition */ typedef struct pruning_scan_cache PRUNING_SCAN_CACHE; struct pruning_scan_cache diff --git a/src/query/query_cl.c b/src/query/query_cl.c index a19bbcdfe9a..0661ce13410 100644 --- a/src/query/query_cl.c +++ b/src/query/query_cl.c @@ -31,9 +31,12 @@ #include #include "query_cl.h" + +#include "compile_context.h" #include "optimizer.h" #include "network_interface_cl.h" #include "transaction_cl.h" +#include "xasl.h" /* * prepare_query () - Prepares a query for later (and repetitive) diff --git a/src/query/query_cl.h b/src/query/query_cl.h index c5deb9cd5b4..76baaa56307 100644 --- a/src/query/query_cl.h +++ b/src/query/query_cl.h @@ -30,7 +30,11 @@ #error Does not belong to server module #endif /* defined (SERVER_MODE) */ -extern int prepare_query (COMPILE_CONTEXT * context, XASL_STREAM * stream); +// forward definition +struct compile_context; +struct xasl_stream; + +extern int prepare_query (compile_context * context, xasl_stream * stream); extern int execute_query (const XASL_ID * xasl_id, QUERY_ID * query_idp, int var_cnt, const DB_VALUE * varptr, QFILE_LIST_ID ** list_idp, QUERY_FLAG flag, CACHE_TIME * clt_cache_time, CACHE_TIME * srv_cache_time); diff --git a/src/query/query_dump.c b/src/query/query_dump.c index 774b83ba9c6..def61cbe55b 100644 --- a/src/query/query_dump.c +++ b/src/query/query_dump.c @@ -1343,6 +1343,10 @@ qdump_function_type_string (FUNC_TYPE ftype) return "PERCENTILE_CONT"; case PT_PERCENTILE_DISC: return "PERCENTILE_DISC"; + case PT_JSON_ARRAYAGG: + return "JSON_ARRAYAGG"; + case PT_JSON_OBJECTAGG: + return "JSON_OBJECTAGG"; case F_TABLE_SET: return "F_TABLE_SET"; case F_TABLE_MULTISET: @@ -1385,8 +1389,16 @@ qdump_function_type_string (FUNC_TYPE ftype) return "JSON_REMOVE"; case F_JSON_ARRAY_APPEND: return "JSON_ARRAY_APPEND"; + case F_JSON_ARRAY_INSERT: + return "JSON_ARRAY_INSERT"; + case F_JSON_CONTAINS_PATH: + return "JSON_CONTAINS_PATH"; + case F_JSON_SEARCH: + return "JSON_SEARCH"; case F_JSON_MERGE: return "JSON_MERGE"; + case F_JSON_MERGE_PATCH: + return "JSON_MERGE_PATCH"; case F_JSON_GET_ALL_PATHS: return "JSON_GET_ALL_PATHS"; default: @@ -1889,9 +1901,13 @@ qdump_print_aggregate_expression (AGGREGATE_TYPE * aggptr) fprintf (foutput, "%s ", qdump_option_string (aggptr->option)); - if (!qdump_print_value (&aggptr->operand)) + REGU_VARIABLE_LIST operand = NULL; + for (operand = aggptr->operands; operand != NULL; operand = operand->next) { - return false; + if (!qdump_print_value (&operand->value)) + { + return false; + } } if (!qdump_print_list_id (aggptr->list_id)) diff --git a/src/query/query_evaluator.c b/src/query/query_evaluator.c index 711251b8cef..a01226c3342 100644 --- a/src/query/query_evaluator.c +++ b/src/query/query_evaluator.c @@ -2537,6 +2537,8 @@ eval_pred_rlike7 (THREAD_ENTRY * thread_p, PRED_EXPR * pr, VAL_DESCR * vd, OID * PR_EVAL_FNC eval_fnc (THREAD_ENTRY * thread_p, PRED_EXPR * pr, DB_TYPE * single_node_type) { + // todo - thread_p is never used + COMP_EVAL_TERM *et_comp; ALSM_EVAL_TERM *et_alsm; diff --git a/src/query/query_executor.c b/src/query/query_executor.c index 51f1b58ae86..7440191ee5c 100644 --- a/src/query/query_executor.c +++ b/src/query/query_executor.c @@ -49,11 +49,13 @@ #include "query_manager.h" #include "extendible_hash.h" #include "replication.h" +#include "elo.h" #include "db_elo.h" #include "locator_sr.h" #include "xserver_interface.h" #include "tz_support.h" #include "session.h" +#include "tz_support.h" #include "db_date.h" #include "btree_load.h" #include "query_dump.h" @@ -1099,16 +1101,21 @@ qexec_end_one_iteration (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * ret = NO_ERROR; #if defined (ENABLE_COMPOSITE_LOCK) - /* At this moment composite locking is not used, but it can be activated at some point in the future. So we leave + /* At this moment composite locking is not used, but it can be activated at some point in the future. So we leave * it as it is. */ if (false) { OID *class_oid = NULL; - if (xasl->aptr_list && xasl->aptr_list->type == BUILDLIST_PROC - && xasl->aptr_list->proc.buildlist.push_list_id) + XASL_NODE *aptr = xasl->aptr_list; + if (aptr) + { + for (XASL_NODE * crt = aptr->next; crt; crt = crt->next, aptr = aptr->next) + ; + } + if (aptr && aptr->type == BUILDLIST_PROC && aptr->proc.buildlist.push_list_id) { - class_oid = &ACCESS_SPEC_CLS_OID (xasl->aptr_list->spec_list); + class_oid = &ACCESS_SPEC_CLS_OID (aptr->spec_list); } ret = @@ -1884,6 +1891,13 @@ qexec_clear_access_spec_list (XASL_NODE * xasl_p, THREAD_ENTRY * thread_p, ACCES case S_SET_SCAN: pg_cnt += qexec_clear_regu_list (xasl_p, p->s_id.s.ssid.scan_pred.regu_list, is_final); break; + case S_JSON_TABLE_SCAN: + { + bool jt_clear_default_values = + XASL_IS_FLAGED (xasl_p, XASL_DECACHE_CLONE) == p->clear_value_at_clone_decache; + p->s_id.s.jtid.clear (xasl_p, is_final, jt_clear_default_values); + } + break; case S_SHOWSTMT_SCAN: break; case S_METHOD_SCAN: @@ -1949,6 +1963,9 @@ qexec_clear_access_spec_list (XASL_NODE * xasl_p, THREAD_ENTRY * thread_p, ACCES pg_cnt += qexec_clear_regu_var (xasl_p, p->s_id.s.ssid.set_ptr, is_final); pr_clear_value (&p->s_id.s.ssid.set); break; + case TARGET_JSON_TABLE: + pg_cnt += qexec_clear_regu_var (xasl_p, p->s.json_table_node.m_json_reguvar, is_final); + break; case TARGET_METHOD: pg_cnt += qexec_clear_regu_list (xasl_p, p->s.method_node.method_regu_list, is_final); break; @@ -2037,7 +2054,7 @@ qexec_clear_agg_list (XASL_NODE * xasl_p, AGGREGATE_TYPE * list, bool is_final) } } - pg_cnt += qexec_clear_regu_var (xasl_p, &p->operand, is_final); + pg_cnt += qexec_clear_regu_variable_list (xasl_p, p->operands, is_final); p->domain = p->original_domain; p->opr_dbtype = p->original_opr_dbtype; } @@ -2070,7 +2087,7 @@ qexec_clear_xasl (THREAD_ENTRY * thread_p, XASL_NODE * xasl, bool is_final) decache_clone_flag = xasl->flag & XASL_DECACHE_CLONE; - /* + /* ** We set this because in some M paths (e.g. when a driver crashes) ** the function qexec_clear_xasl() can be called recursively. By setting ** the query_in_progress flag, we prevent qmgr_clear_trans_wakeup() from @@ -2611,7 +2628,7 @@ qexec_clear_all_lists (THREAD_ENTRY * thread_p, XASL_NODE * xasl_list) qexec_clear_all_lists (thread_p, xasl->fptr_list); } - /* Note: Dptr lists are only procedure blocks (other than aptr_list) which can produce a LIST FILE. Therefore, we + /* Note: Dptr lists are only procedure blocks (other than aptr_list) which can produce a LIST FILE. Therefore, we * are trying to clear all the dptr_list result LIST FILES in the XASL tree per iteration. */ if (xasl->dptr_list) { @@ -2730,7 +2747,7 @@ qexec_eval_ordbynum_pred (THREAD_ENTRY * thread_p, ORDBYNUM_INFO * ordby_info) if (ordby_info->ordbynum_pred) { - /* + /* * Evaluate the predicate. * CUBRID does not currently support such predicates in WHERE condition * lists but might support them in future versions (see the usage of @@ -2886,7 +2903,7 @@ qexec_ordby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) if (ovfl_vpid.pageid == NULL_PAGEID) { - /* This is the normal case of a non-overflow tuple. We can use the page image directly, since we know + /* This is the normal case of a non-overflow tuple. We can use the page image directly, since we know * that the tuple resides entirely on that page. */ data = page + key->s.original.offset; @@ -3015,7 +3032,7 @@ qexec_fill_sort_limit (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * x { if (dom_status == DOMAIN_OVERFLOW) { - /* The limit is too bog to fit an integer. However, since this limit is used to keep the sort run flushes + /* The limit is too bog to fit an integer. However, since this limit is used to keep the sort run flushes * small (for instance only keep the first 10 elements of each run if ORDER BY LIMIT 10 is specified), * there is no conceivable way this limit would be useful if it is larger than 2.147 billion: such a * large run is infeasible anyway. So if it does not fit into an integer, discard it. */ @@ -3551,7 +3568,7 @@ qexec_clear_groupby_state (THREAD_ENTRY * thread_p, GROUPBY_STATE * gbstate) gbstate->gby_rec.area_size = 0; } gbstate->output_tplrec = NULL; - /* + /* * Don't cleanup gbstate->input_tpl; the memory it points to was * managed by the listfile manager (via input_scan), and it's not * ours to free. @@ -3895,7 +3912,7 @@ qexec_hash_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a data = page + key->s.original.offset; if (vpid.pageid != NULL_PAGEID) { - /* + /* * This sucks; why do we need two different structures to * accomplish exactly the same goal? */ @@ -3905,7 +3922,7 @@ qexec_hash_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a if (dummy.tpl != context->tuple_recdes.data) { - /* + /* * DON'T FREE THE BUFFER! qfile_get_tuple() already did * that, and what you have here in gby_rec is a dangling * pointer. @@ -3929,7 +3946,7 @@ qexec_hash_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a } else { - /* + /* * sorting over all columns (i.e. no aggregate functions); build * tuple from sort key. */ @@ -4056,7 +4073,7 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) peek = COPY; /* default */ if (info->key_info.use_original) { /* P_sort_key */ - /* + /* * Retrieve the original tuple. This will be the case if the * original tuple had more fields than we were sorting on. */ @@ -4101,7 +4118,7 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) data = page + key->s.original.offset; if (vpid.pageid != NULL_PAGEID) { - /* + /* * This sucks; why do we need two different structures to * accomplish exactly the same goal? */ @@ -4111,7 +4128,7 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) if (dummy.tpl != info->gby_rec.data) { - /* + /* * DON'T FREE THE BUFFER! qfile_get_tuple() already did * that, and what you have here in gby_rec is a dangling * pointer. @@ -4133,7 +4150,7 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) } else { /* A_sort_key */ - /* + /* * We didn't record the original vpid, and we should just * reconstruct the original record from this sort key (rather * than pressure the page buffer pool by reading in the original @@ -4148,7 +4165,7 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) if (info->input_recs == 0) { - /* + /* * First record we've seen; put it out and set up the group * comparison key(s). */ @@ -4196,14 +4213,14 @@ qexec_gby_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *arg) } else if ((*info->cmp_fn) (&info->current_key.data, &key, &info->key_info) == 0) { - /* + /* * Still in the same group; accumulate the tuple and proceed, * leaving the group key the same. */ } else { - /* + /* * We got a new group; finalize the group we were accumulating, * and start a new group using the current key as the group key. */ @@ -4376,7 +4393,7 @@ qexec_groupby (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_stat GOTO_EXIT_ON_ERROR; } - /* + /* * Create a new listfile to receive the results. */ { @@ -4388,7 +4405,7 @@ qexec_groupby (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_stat GOTO_EXIT_ON_ERROR; } /* If it does not have 'order by'(xasl->orderby_list), then the list file to be open at here will be the last one. - * Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can have 'group + * Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can have 'group * by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the last * result file. */ @@ -4585,7 +4602,7 @@ qexec_groupby (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_stat gbstate.agg_hash_context->part_scan_code = S_END; } - /* + /* * Open a scan on the unsorted input file */ if (qfile_open_list_scan (list_id, &input_scan_id) != NO_ERROR) @@ -4594,7 +4611,7 @@ qexec_groupby (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_stat } gbstate.input_scan = &input_scan_id; - /* + /* * Now load up the sort module and set it off... */ gbstate.key_info.use_original = (gbstate.key_info.nkeys != list_id->type_list.type_cnt); @@ -4622,7 +4639,7 @@ qexec_groupby (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_stat GOTO_EXIT_ON_ERROR; } - /* + /* * There may be one unfinished group in the output, since the sort_listfile * interface doesn't include a finalization function. If so, finish * off that group. @@ -5277,7 +5294,7 @@ qexec_merge_list (THREAD_ENTRY * thread_p, QFILE_LIST_ID * outer_list_idp, QFILE } /* When a list file is sorted on a column, all the NULL values appear at the beginning of the list. So, we know that - * all the following values in the inner/outer column are BOUND(not NULL) values. Depending on the join type, we must + * all the following values in the inner/outer column are BOUND(not NULL) values. Depending on the join type, we must * skip or join with a NULL opposite row, when a NULL is encountered. */ /* move the outer(left) scan to the first tuple */ @@ -6277,9 +6294,9 @@ qexec_merge_listfiles (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * x } } - /* If MERGELIST_PROC does not have 'order by' (xasl->orderby_list), then the list file to be open at here will be the + /* If MERGELIST_PROC does not have 'order by' (xasl->orderby_list), then the list file to be open at here will be the * last one. Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can - * have 'group by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the + * have 'group by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the * last result file. */ QFILE_SET_FLAG (ls_flag, QFILE_FLAG_ALL); @@ -6366,7 +6383,7 @@ qexec_merge_listfiles (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * x * grouped(in) : Grouped scan flag * iscan_oid_order(in) : * s_id(out) : Set to the scan identifier - * p_mvcc_select_lock_needed(out): true, whether instance lock needed at select + * p_mvcc_select_lock_needed(out): true, whether instance lock needed at select * * Note: This routine is used to open a scan on an access specification * node. A scan identifier is created with the given parameters. @@ -6453,6 +6470,7 @@ qexec_open_scan (THREAD_ENTRY * thread_p, ACCESS_SPEC_TYPE * curr_spec, VAL_LIST er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QPROC_INVALID_XASLNODE, 0); return ER_FAILED; } /* if */ + if (scan_type == S_HEAP_SCAN || scan_type == S_HEAP_SCAN_RECORD_INFO) { if (scan_open_heap_scan (thread_p, s_id, mvcc_select_lock_needed, scan_op_type, fixed, grouped, @@ -6581,6 +6599,15 @@ qexec_open_scan (THREAD_ENTRY * thread_p, ACCESS_SPEC_TYPE * curr_spec, VAL_LIST } break; + case TARGET_JSON_TABLE: + /* open a json table based derived table scan */ + if (scan_open_json_table_scan (thread_p, s_id, grouped, curr_spec->single_fetch, curr_spec->s_dbval, val_list, + vd, curr_spec->where_pred) != NO_ERROR) + { + goto exit_on_error; + } + break; + case TARGET_METHOD: if (scan_open_method_scan (thread_p, s_id, grouped, curr_spec->single_fetch, curr_spec->s_dbval, val_list, vd, ACCESS_SPEC_METHOD_LIST_ID (curr_spec), @@ -6628,50 +6655,66 @@ qexec_open_scan (THREAD_ENTRY * thread_p, ACCESS_SPEC_TYPE * curr_spec, VAL_LIST static void qexec_close_scan (THREAD_ENTRY * thread_p, ACCESS_SPEC_TYPE * curr_spec) { - if (curr_spec) + if (curr_spec == NULL) { - /* monitoring */ - switch (curr_spec->type) + return; + } + + /* monitoring */ + switch (curr_spec->type) + { + case TARGET_CLASS: + if (curr_spec->access == ACCESS_METHOD_SEQUENTIAL || curr_spec->access == ACCESS_METHOD_SEQUENTIAL_RECORD_INFO + || curr_spec->access == ACCESS_METHOD_SEQUENTIAL_PAGE_SCAN) { - case TARGET_CLASS: - if (curr_spec->access == ACCESS_METHOD_SEQUENTIAL || curr_spec->access == ACCESS_METHOD_SEQUENTIAL_RECORD_INFO - || curr_spec->access == ACCESS_METHOD_SEQUENTIAL_PAGE_SCAN) - { - perfmon_inc_stat (thread_p, PSTAT_QM_NUM_SSCANS); - } - else if (IS_ANY_INDEX_ACCESS (curr_spec->access)) - { - perfmon_inc_stat (thread_p, PSTAT_QM_NUM_ISCANS); - } - if (curr_spec->parts != NULL) - { - /* reset pruning info */ - db_private_free (thread_p, curr_spec->parts); - curr_spec->parts = NULL; - curr_spec->curent = NULL; - curr_spec->pruned = false; - } - break; - case TARGET_CLASS_ATTR: - break; - case TARGET_LIST: - perfmon_inc_stat (thread_p, PSTAT_QM_NUM_LSCANS); - break; - case TARGET_SHOWSTMT: - /* do nothing */ - break; - case TARGET_REGUVAL_LIST: - /* currently do nothing */ - break; - case TARGET_SET: - perfmon_inc_stat (thread_p, PSTAT_QM_NUM_SETSCANS); - break; - case TARGET_METHOD: - perfmon_inc_stat (thread_p, PSTAT_QM_NUM_METHSCANS); - break; + perfmon_inc_stat (thread_p, PSTAT_QM_NUM_SSCANS); + } + else if (IS_ANY_INDEX_ACCESS (curr_spec->access)) + { + perfmon_inc_stat (thread_p, PSTAT_QM_NUM_ISCANS); + } + + if (curr_spec->parts != NULL) + { + /* reset pruning info */ + db_private_free (thread_p, curr_spec->parts); + curr_spec->parts = NULL; + curr_spec->curent = NULL; + curr_spec->pruned = false; } - scan_close_scan (thread_p, &curr_spec->s_id); + break; + + case TARGET_CLASS_ATTR: + break; + + case TARGET_LIST: + perfmon_inc_stat (thread_p, PSTAT_QM_NUM_LSCANS); + break; + + case TARGET_SHOWSTMT: + /* do nothing */ + break; + + case TARGET_REGUVAL_LIST: + /* currently do nothing */ + break; + + case TARGET_SET: + perfmon_inc_stat (thread_p, PSTAT_QM_NUM_SETSCANS); + break; + + case TARGET_JSON_TABLE: + /* currently do nothing + todo: check if here need to add something + */ + break; + + case TARGET_METHOD: + perfmon_inc_stat (thread_p, PSTAT_QM_NUM_METHSCANS); + break; } + + scan_close_scan (thread_p, &curr_spec->s_id); } /* @@ -8663,7 +8706,6 @@ qexec_execute_update (THREAD_ENTRY * thread_p, XASL_NODE * xasl, bool has_delete GOTO_EXIT_ON_ERROR; } - if (p_class_instance_lock_info && p_class_instance_lock_info->instances_locked) { /* already locked in select phase. Avoid locking again the same instances at update phase */ @@ -8675,7 +8717,7 @@ qexec_execute_update (THREAD_ENTRY * thread_p, XASL_NODE * xasl, bool has_delete need_locking = true; } - /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a + /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a * type list. */ if (qexec_setup_list_id (thread_p, xasl) != NO_ERROR) { @@ -9047,7 +9089,7 @@ qexec_execute_update (THREAD_ENTRY * thread_p, XASL_NODE * xasl, bool has_delete if (mvcc_upddel_reev_data.mvcc_cond_reev_list == NULL && mvcc_reev_class_cnt > 0) { /* If scan order was not set then do it. This operation must be run only once. We do it here and not at - * the beginning of this function because the class OIDs must be set for classes involved in reevaluation + * the beginning of this function because the class OIDs must be set for classes involved in reevaluation * (in mvcc_reev_classes) prior to this operation */ mvcc_upddel_reev_data.mvcc_cond_reev_list = qexec_mvcc_cond_reev_set_scan_order (aptr, mvcc_reev_classes, mvcc_reev_class_cnt, update->classes, @@ -9099,7 +9141,7 @@ qexec_execute_update (THREAD_ENTRY * thread_p, XASL_NODE * xasl, bool has_delete } } - /* Flush new values for each class. The class list was built from right to left during XASL generation, so in + /* Flush new values for each class. The class list was built from right to left during XASL generation, so in * order to maintain the correct update order specified in the query, we must iterate from right to left as * well; this makes a difference only when we update the same attribute of the same class more than once. */ for (class_oid_idx = class_oid_cnt - 1, mvcc_reev_class_idx = mvcc_reev_class_cnt - 1; class_oid_idx >= 0; @@ -9569,14 +9611,13 @@ qexec_execute_delete (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xa } /* lock classes from which this query will delete */ - error = qexec_set_class_locks (thread_p, xasl->aptr_list, delete_->classes, delete_->num_classes, internal_classes); + aptr = xasl->aptr_list; + error = qexec_set_class_locks (thread_p, aptr, delete_->classes, delete_->num_classes, internal_classes); if (error != NO_ERROR) { GOTO_EXIT_ON_ERROR; } - aptr = xasl->aptr_list; - error = prepare_mvcc_reev_data (thread_p, aptr, xasl_state, mvcc_reev_class_cnt, delete_->mvcc_reev_classes, &mvcc_upddel_reev_data, delete_->num_classes, delete_->classes, internal_classes, 0, NULL, @@ -9612,7 +9653,7 @@ qexec_execute_delete (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xa need_locking = true; } - /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a + /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a * type list. */ if ((qexec_setup_list_id (thread_p, xasl) != NO_ERROR) /* it can be > 2 || (aptr->list_id->type_list.type_cnt != 2) */ ) @@ -9763,7 +9804,7 @@ qexec_execute_delete (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xa if (mvcc_upddel_reev_data.mvcc_cond_reev_list == NULL) { /* If scan order was not set then do it. This operation must be run only once. We do it here and not at - * the beginning of this function because the class OIDs must be set for classes involved in reevaluation + * the beginning of this function because the class OIDs must be set for classes involved in reevaluation * (in mvcc_reev_classes) prior to this operation */ mvcc_upddel_reev_data.mvcc_cond_reev_list = qexec_mvcc_cond_reev_set_scan_order (aptr, mvcc_reev_classes, mvcc_reev_class_cnt, delete_->classes, @@ -10808,7 +10849,7 @@ qexec_execute_insert (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xa xasl->list_id->query_id = xasl_state->query_id; } - /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a + /* This guarantees that the result list file will have a type list. Copying a list_id structure fails unless it has a * type list. */ if (qexec_setup_list_id (thread_p, xasl) != NO_ERROR) { @@ -11740,7 +11781,7 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * } } - /* + /* * Pre_processing */ @@ -11769,7 +11810,7 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * } } - /* + /* * Processing */ @@ -11798,7 +11839,7 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * } scan_cache_end_needed = true; - /* must choose corresponding lock_mode for scan_operation_type. + /* must choose corresponding lock_mode for scan_operation_type. * for root classes the lock_mode is considered, not the operation type */ lock_mode = locator_get_lock_mode_from_op_type (scan_operation_type); @@ -11807,9 +11848,9 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * PEEK, NULL_CHN); if (scan != S_SUCCESS) { - /* setting ER_HEAP_UNKNOWN_OBJECT error for deleted or invisible objects should be replaced by a more clear + /* setting ER_HEAP_UNKNOWN_OBJECT error for deleted or invisible objects should be replaced by a more clear * way of handling the return code; it is imposible to decide at low level heap get functions if it is - * expected to reach a deleted object and also it is difficult to propagate the NON_EXISTENT_HANDLING + * expected to reach a deleted object and also it is difficult to propagate the NON_EXISTENT_HANDLING * argument through all the callers; this system can currently generate some irrelevant error log that is * hard to eliminate */ if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED) @@ -11840,7 +11881,7 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * else { /* check to see if the object is one of the classes that we are interested in. This can only fail if there - * was a selector variable in the query. we can optimize this further to pass from the compiler whether this + * was a selector variable in the query. we can optimize this further to pass from the compiler whether this * check is necessary or not. */ bool found = false; @@ -11858,7 +11899,7 @@ qexec_execute_obj_fetch (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * if (!specp->pruned && specp->type == TARGET_CLASS) { - /* cls_oid might still refer to this spec through a partition. See if we already pruned this spec and + /* cls_oid might still refer to this spec through a partition. See if we already pruned this spec and * search through partitions for the appropriate class */ PARTITION_SPEC_TYPE *partition_spec = NULL; int granted; @@ -13008,9 +13049,9 @@ qexec_end_buildvalueblock_iterations (THREAD_ENTRY * thread_p, XASL_NODE * xasl, GOTO_EXIT_ON_ERROR; } - /* If BUILDVALUE_PROC does not have 'order by'(xasl->orderby_list), then the list file to be open at here will be the + /* If BUILDVALUE_PROC does not have 'order by'(xasl->orderby_list), then the list file to be open at here will be the * last one. Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can - * have 'group by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the + * have 'group by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the * last result file. */ QFILE_SET_FLAG (ls_flag, QFILE_FLAG_ALL); if (XASL_IS_FLAGED (xasl, XASL_TOP_MOST_XASL) && XASL_IS_FLAGED (xasl, XASL_TO_BE_CACHED) @@ -13458,7 +13499,7 @@ qexec_execute_mainblock_internal (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XAS bool instant_lock_mode_started = false; bool mvcc_select_lock_needed; - /* + /* * Pre_processing */ @@ -13807,11 +13848,11 @@ qexec_execute_mainblock_internal (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XAS GOTO_EXIT_ON_ERROR; } - /* + /* * Processing */ - /* Block out main part of query processing for performance profiling of JDBC driver and CAS side. Main purpose of - * this modification is to pretend that the server's scan time is very fast so that it affect only little portion + /* Block out main part of query processing for performance profiling of JDBC driver and CAS side. Main purpose of + * this modification is to pretend that the server's scan time is very fast so that it affect only little portion * of whole turnaround time in the point of view of the JDBC driver. */ /* iterative processing is done only for XASL blocks that has access specification list blocks. */ @@ -14200,11 +14241,11 @@ qexec_execute_mainblock_internal (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XAS GOTO_EXIT_ON_ERROR; } - /* + /* * Post_processing */ - /* + /* * DISTINCT processing caused by statement set operators(UNION, * DIFFERENCE, INTERSECTION) has already taken place now. * But, in the other cases, DISTINCT are not processed yet. @@ -14322,7 +14363,7 @@ qexec_execute_mainblock_internal (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XAS } - /* + /* * Cleanup and Exit processing */ if (instant_lock_mode_started == true) @@ -14355,7 +14396,7 @@ qexec_execute_mainblock_internal (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XAS return NO_ERROR; - /* + /* * Error processing */ exit_on_error: @@ -14626,7 +14667,7 @@ qexec_execute_query (THREAD_ENTRY * thread_p, XASL_NODE * xasl, int dbval_cnt, c { char buf[512]; - /* Make sure this does NOT return error indication without setting an error message and code. If we + /* Make sure this does NOT return error indication without setting an error message and code. If we * get here, we most likely have a system error. qp_xasl_line is the first line to set an error * condition. */ snprintf (buf, 511, "Query execution failure #%d.", xasl_state.qp_xasl_line); @@ -14775,7 +14816,7 @@ qexec_clear_list_cache_by_class (THREAD_ENTRY * thread_p, const OID * class_oid) XASL_CACHE_ENTRY *ent; void *last; - /* for all entries in the class oid hash table Note that mht_put2() allows mutiple data with the same key, so we have + /* for all entries in the class oid hash table Note that mht_put2() allows mutiple data with the same key, so we have * to use mht_get2() */ last = NULL; @@ -15720,8 +15761,8 @@ qexec_execute_cte (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_ GOTO_EXIT_ON_ERROR; } - /* the recursive part XASL is executed totally (all iterations) - * and the results will be inserted in non_recursive_part->list_id + /* the recursive part XASL is executed totally (all iterations) + * and the results will be inserted in non_recursive_part->list_id */ while (non_recursive_part->list_id->tuple_cnt > 0) @@ -15797,7 +15838,7 @@ qexec_execute_cte (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xasl_ else if (recursive_part->spec_list->s.list_node.xasl_node == non_recursive_part) { /* optimization: use non-recursive list id for both reading and writing - * the recursive xasl will iterate through this list id while appending new results at its end + * the recursive xasl will iterate through this list id while appending new results at its end * note: this works only if the cte(actually the non_recursive_part link) is the first spec used * for scanning during recursive iterations */ @@ -17615,7 +17656,7 @@ qexec_gby_start_group (THREAD_ENTRY * thread_p, GROUPBY_STATE * gbstate, const R if (N == 0) { - /* + /* * Record the new key; keep it in SORT_KEY format so we can continue * to use the SORTKEY_INFO version of the comparison functions. * @@ -18224,14 +18265,16 @@ qexec_resolve_domains_for_group_by (BUILDLIST_PROC_NODE * buildlist, OUTPTR_LIST continue; } - assert (group_agg->operand.type == TYPE_CONSTANT); + REGU_VARIABLE operand = group_agg->operands->value; - if ((TP_DOMAIN_TYPE (group_agg->operand.domain) == DB_TYPE_VARIABLE - || TP_DOMAIN_COLLATION_FLAG (group_agg->operand.domain) != TP_DOMAIN_COLL_NORMAL) - && group_agg->operand.value.dbvalptr == val_list_ref_dbvalue) + assert (operand.type == TYPE_CONSTANT); + + if ((TP_DOMAIN_TYPE (operand.domain) == DB_TYPE_VARIABLE + || TP_DOMAIN_COLLATION_FLAG (operand.domain) != TP_DOMAIN_COLL_NORMAL) + && operand.value.dbvalptr == val_list_ref_dbvalue) { /* update domain of aggregate's operand */ - group_agg->operand.domain = ref_domain; + operand.domain = ref_domain; group_agg->opr_dbtype = TP_DOMAIN_TYPE (ref_domain); if (TP_DOMAIN_TYPE (group_agg->domain) == DB_TYPE_VARIABLE @@ -18280,7 +18323,8 @@ qexec_resolve_domains_for_group_by (BUILDLIST_PROC_NODE * buildlist, OUTPTR_LIST || group_agg->function == PT_COUNT || group_agg->function == PT_AVG || group_agg->function == PT_STDDEV || group_agg->function == PT_VARIANCE || group_agg->function == PT_STDDEV_POP || group_agg->function == PT_VAR_POP - || group_agg->function == PT_STDDEV_SAMP || group_agg->function == PT_VAR_SAMP); + || group_agg->function == PT_STDDEV_SAMP || group_agg->function == PT_VAR_SAMP + || group_agg->function == PT_JSON_ARRAYAGG || group_agg->function == PT_JSON_OBJECTAGG); } g_agg_val_found = true; @@ -18423,7 +18467,7 @@ qexec_resolve_domains_for_aggregation (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * } /* fetch function operand */ - if (fetch_peek_dbval (thread_p, &agg_p->operand, &xasl_state->vd, NULL, NULL, NULL, &dbval) != NO_ERROR) + if (fetch_peek_dbval (thread_p, &agg_p->operands->value, &xasl_state->vd, NULL, NULL, NULL, &dbval) != NO_ERROR) { return ER_FAILED; } @@ -18472,6 +18516,8 @@ qexec_resolve_domains_for_aggregation (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * case PT_AGG_BIT_XOR: case PT_MIN: case PT_MAX: + case PT_JSON_ARRAYAGG: + case PT_JSON_OBJECTAGG: agg_p->accumulator_domain.value_dom = agg_p->domain; agg_p->accumulator_domain.value2_dom = &tp_Null_domain; break; @@ -18553,13 +18599,13 @@ qexec_resolve_domains_for_aggregation (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * break; default: - assert (agg_p->operand.type == TYPE_CONSTANT || agg_p->operand.type == TYPE_DBVAL - || agg_p->operand.type == TYPE_INARITH); + assert (agg_p->operands->value.type == TYPE_CONSTANT || agg_p->operands->value.type == TYPE_DBVAL + || agg_p->operands->value.type == TYPE_INARITH); /* try to cast dbval to double, datetime then time */ tmp_domain_p = tp_domain_resolve_default (DB_TYPE_DOUBLE); - if (REGU_VARIABLE_IS_FLAGED (&agg_p->operand, REGU_VARIABLE_CLEAR_AT_CLONE_DECACHE)) + if (REGU_VARIABLE_IS_FLAGED (&agg_p->operands->value, REGU_VARIABLE_CLEAR_AT_CLONE_DECACHE)) { save_heapid = db_change_private_heap (thread_p, 0); } @@ -18703,7 +18749,7 @@ qexec_groupby_index (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xas assert (gbstate.g_dim_levels == 1); assert (gbstate.with_rollup == false); - /* + /* * Create a new listfile to receive the results. */ { @@ -18715,7 +18761,7 @@ qexec_groupby_index (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xas GOTO_EXIT_ON_ERROR; } /* If it does not have 'order by'(xasl->orderby_list), then the list file to be open at here will be the last one. - * Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can have 'group + * Otherwise, the last list file will be open at qexec_orderby_distinct(). (Note that only one that can have 'group * by' is BUILDLIST_PROC type.) And, the top most XASL is the other condition for the list file to be the last * result file. */ @@ -18752,7 +18798,7 @@ qexec_groupby_index (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * xas tuple_cnt = list_id->tuple_cnt; } - /* + /* * Open a scan on the unsorted input file */ if (qfile_open_list_scan (list_id, &input_scan_id) != NO_ERROR) @@ -19225,7 +19271,7 @@ qexec_execute_analytic (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * goto wrapup; } - /* + /* * Open a scan on the unsorted input file */ if (qfile_open_list_scan (list_id, &input_scan_id) != NO_ERROR) @@ -19234,7 +19280,7 @@ qexec_execute_analytic (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * } analytic_state.input_scan = &input_scan_id; - /* + /* * open a scan on the intermediate file */ if (qfile_open_list_scan (analytic_state.interm_file, &interm_scan_id) != NO_ERROR) @@ -19244,7 +19290,7 @@ qexec_execute_analytic (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * interm_scan_id.keep_page_on_finish = 1; analytic_state.interm_scan = &interm_scan_id; - /* + /* * Now load up the sort module and set it off... */ @@ -19273,7 +19319,7 @@ qexec_execute_analytic (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STATE * GOTO_EXIT_ON_ERROR; } - /* + /* * There may be one unfinished group in the output, since the sort_listfile * interface doesn't include a finalization function. If so, finish * off that group. @@ -19670,7 +19716,7 @@ qexec_analytic_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a goto exit_on_error; } - /* + /* * Retrieve the original tuple. This will be the case if the * original tuple had more fields than we were sorting on. */ @@ -19700,7 +19746,7 @@ qexec_analytic_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a data = analytic_state->curr_sort_page.page_p + key->s.original.offset; if (vpid.pageid != NULL_PAGEID) { - /* + /* * This sucks; why do we need two different structures to * accomplish exactly the same goal? */ @@ -19710,7 +19756,7 @@ qexec_analytic_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a if (dummy.tpl != analytic_state->analytic_rec.data) { - /* + /* * DON'T FREE THE BUFFER! qfile_get_tuple() already did * that, and what you have here in gby_rec is a dangling * pointer. @@ -19730,7 +19776,7 @@ qexec_analytic_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a peek = PEEK; /* avoid unnecessary COPY */ } - /* + /* * process current sorted tuple */ if (analytic_state->input_recs == 0) @@ -19762,7 +19808,7 @@ qexec_analytic_put_next (THREAD_ENTRY * thread_p, const RECDES * recdes, void *a if (QPROC_ANALYTIC_IS_OFFSET_FUNCTION (func_state->func_p)) { - /* offset functions will treat all tuples in a group as having a different sort key regardless if + /* offset functions will treat all tuples in a group as having a different sort key regardless if * this is true or not; this is done in order to have a distinct value for each tuple in the * group (whereas normally tuples sharing a sort key will also share a value) */ is_same_group = true; @@ -19919,7 +19965,7 @@ qexec_analytic_start_group (THREAD_ENTRY * thread_p, XASL_STATE * xasl_state, AN { int error; - /* + /* * Record the new key; keep it in SORT_KEY format so we can continue * to use the SORTKEY_INFO version of the comparison functions. * @@ -19945,7 +19991,7 @@ qexec_analytic_start_group (THREAD_ENTRY * thread_p, XASL_STATE * xasl_state, AN func_state->current_key.length = key->length; } - /* + /* * (Re)initialize the various accumulator variables... */ if (reinit) @@ -21041,7 +21087,7 @@ qexec_analytic_update_group_result (THREAD_ENTRY * thread_p, ANALYTIC_STATE * an return ER_FAILED; } - /* we will use each func_state->value as a buffer to read values from the sort key headers, so make sure it points to + /* we will use each func_state->value as a buffer to read values from the sort key headers, so make sure it points to * the vallist in order to correctly output values */ for (i = 0; i < analytic_state->func_count; i++) { @@ -22735,7 +22781,7 @@ qexec_execute_build_columns (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STA { case 0: /* Do not copy the string--just use the pointer. The pr_ routines for strings and sets have different - * semantics for length. A negative length value for strings means "don't copy thestring, just use the + * semantics for length. A negative length value for strings means "don't copy thestring, just use the * pointer". */ disk_length = attrepr->current_default_value.val_length; @@ -22754,7 +22800,7 @@ qexec_execute_build_columns (THREAD_ENTRY * thread_p, XASL_NODE * xasl, XASL_STA idx_val++; break; default: - /* + /* * An error was found during the reading of the * attribute value */ @@ -23027,7 +23073,7 @@ qexec_create_internal_classes (THREAD_ENTRY * thread_p, UPDDEL_CLASS_INFO * quer * return : error code or NO_ERROR * thread_p (in) : * aptr (in): XASL for generated SELECT statement for UPDATE - * should_delete (in): + * should_delete (in): * classes (in) : internal classes array * num_classes (in) : count internal classes array elements * num_assignments (in) : no of assignments @@ -23152,7 +23198,7 @@ qexec_clear_internal_classes (THREAD_ENTRY * thread_p, UPDDEL_CLASS_INFO_INTERNA * in a class hierarchy * return : error code or NO_ERROR * thread_p (in) : - * aptr_list (in) : + * aptr_list (in) : * mvcc_data_filter (in) : filter info * class_oid (in) : class oid * @@ -23312,7 +23358,7 @@ qexec_upddel_setup_current_class (THREAD_ENTRY * thread_p, UPDDEL_CLASS_INFO * q /* Start a HEAP_SCANCACHE object on the new class. Partitioned classes and class hierarchies are handled differently */ if (internal_class->needs_pruning) { - /* Get a scan_cache object from the pruning context. We don't close the previous one here, it will be closed when + /* Get a scan_cache object from the pruning context. We don't close the previous one here, it will be closed when * the pruning context is cleared. */ PRUNING_SCAN_CACHE *pcache = NULL; pcache = @@ -24671,7 +24717,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro proc->agg_hash_context.sort_key.key = NULL; proc->agg_hash_context.sort_key.nkeys = 0; - /* + /* * create temporary dbvalue array */ if (proc->g_func_count > 0) @@ -24686,7 +24732,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro } } - /* + /* * keep key domains */ proc->agg_hash_context.key_domains = @@ -24704,7 +24750,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro proc->agg_hash_context.key_domains[i] = regu_list->value.domain; } - /* + /* * keep accumulator domains */ if (proc->g_func_count > 0) @@ -24728,7 +24774,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro } } - /* + /* * create partial list file */ @@ -24818,7 +24864,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro /* free memory */ db_private_free (thread_p, type_list.domp); - /* + /* * create hash table */ proc->agg_hash_context.hash_table = @@ -24833,7 +24879,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro proc->agg_hash_context.hash_table->build_lru_list = true; } - /* + /* * create temp keys */ proc->agg_hash_context.temp_key = qdata_alloc_agg_hkey (thread_p, proc->g_hkey_size, false); @@ -24846,7 +24892,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro goto exit_on_error; } - /* + /* * create temp values */ proc->agg_hash_context.temp_part_value = qdata_alloc_agg_hvalue (thread_p, proc->g_func_count); @@ -24857,7 +24903,7 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro goto exit_on_error; } - /* + /* * initialize recdes */ proc->agg_hash_context.tuple_recdes.data = 0; @@ -24865,13 +24911,13 @@ qexec_alloc_agg_hash_context (THREAD_ENTRY * thread_p, BUILDLIST_PROC_NODE * pro proc->agg_hash_context.tuple_recdes.length = 0; proc->agg_hash_context.tuple_recdes.area_size = 0; - /* + /* * initialize sort input tuple */ proc->agg_hash_context.input_tuple.size = 0; proc->agg_hash_context.input_tuple.tpl = NULL; - /* + /* * initialize remaining fields */ proc->agg_hash_context.hash_size = 0; diff --git a/src/query/query_manager.c b/src/query/query_manager.c index 1865377928a..54968dd7812 100644 --- a/src/query/query_manager.c +++ b/src/query/query_manager.c @@ -30,6 +30,8 @@ #include #include "query_manager.h" + +#include "compile_context.h" #include "object_primitive.h" #include "xserver_interface.h" #include "query_executor.h" diff --git a/src/query/query_opfunc.c b/src/query/query_opfunc.c index 161c8e8c74d..e3f7ef9ab04 100644 --- a/src/query/query_opfunc.c +++ b/src/query/query_opfunc.c @@ -43,6 +43,7 @@ #include "databases_file.h" #include "tz_support.h" #include "numeric_opfunc.h" +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" #include "query_dump.h" @@ -247,6 +248,18 @@ static int qdata_json_array_append (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, QFILE_TUPLE tuple); +static int +qdata_json_array_insert (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple); + +static int +qdata_json_search (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple); + +static int +qdata_json_contains_path (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple); + static int qdata_json_get_all_paths (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, QFILE_TUPLE tuple); @@ -255,6 +268,10 @@ static int qdata_json_merge (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, QFILE_TUPLE tuple); +static int +qdata_json_merge_patch (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple); + static int (*generic_func_ptrs[]) (THREAD_ENTRY * thread_p, DB_VALUE *, int, DB_VALUE **) = { qdata_dummy}; @@ -6099,6 +6116,19 @@ qdata_json_type_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * dom return qdata_coerce_result_to_domain (result_p, domain_p); } +int +qdata_json_pretty_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p) +{ + int error_code = db_json_pretty_dbval (dbval1_p, result_p); + + if (error_code != NO_ERROR) + { + return error_code; + } + + return qdata_coerce_result_to_domain (result_p, domain_p); +} + int qdata_json_valid_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p) { @@ -6124,6 +6154,18 @@ qdata_json_depth_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * do return db_json_depth_dbval (dbval1_p, result_p); } +int +qdata_json_quote_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p) +{ + return db_string_quote (dbval1_p, result_p); +} + +int +qdata_json_unquote_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p) +{ + return db_json_unquote_dbval (dbval1_p, result_p); +} + int qdata_json_extract_dbval (const DB_VALUE * json, const DB_VALUE * path, DB_VALUE * json_res, TP_DOMAIN * domain_p) { @@ -6360,7 +6402,7 @@ qdata_process_distinct_or_sort (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_p, return ER_FAILED; } - type_list.domp[0] = agg_p->operand.domain; + type_list.domp[0] = agg_p->operands->value.domain; /* if the agg has ORDER BY force setting 'QFILE_FLAG_ALL' : in this case, no additional SORT_LIST will be created, * but the one in the AGGREGATE_TYPE structure will be used */ if (agg_p->sort_list != NULL) @@ -6491,10 +6533,16 @@ qdata_aggregate_accumulator_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_A case PT_AGG_BIT_XOR: case PT_AVG: case PT_SUM: - /* these functions only affect acc.value and new_acc can be treated as an ordinary value */ + // these functions only affect acc.value and new_acc can be treated as an ordinary value error = qdata_aggregate_value_to_accumulator (thread_p, acc, acc_dom, func_type, func_domain, new_acc->value); break; + // for these two situations we just need to merge + case PT_JSON_ARRAYAGG: + case PT_JSON_OBJECTAGG: + error = db_json_merge (new_acc->value, acc->value); + break; + case PT_STDDEV: case PT_STDDEV_POP: case PT_STDDEV_SAMP: @@ -6565,6 +6613,7 @@ qdata_aggregate_accumulator_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_A * func_type(in): function type * func_domain(in): function domain * value(in): value + * value_next(int): value of the second argument; used only for JSON_OBJECTAGG */ int qdata_aggregate_value_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMULATOR * acc, @@ -6743,6 +6792,13 @@ qdata_aggregate_value_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMUL } break; + case PT_JSON_ARRAYAGG: + if (db_json_arrayagg_dbval_accumulate (value, acc->value) != NO_ERROR) + { + return ER_FAILED; + } + break; + default: er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QPROC_INVALID_XASLNODE, 0); return ER_FAILED; @@ -6773,6 +6829,45 @@ qdata_aggregate_value_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMUL return NO_ERROR; } +/* *INDENT-OFF* */ +int +qdata_aggregate_multiple_values_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMULATOR * acc, + AGGREGATE_ACCUMULATOR_DOMAIN * domain, FUNC_TYPE func_type, + TP_DOMAIN * func_domain, std::vector & db_values) +{ + // we have only one argument so aggregate only the first db_value + if (db_values.size () == 1) + { + return qdata_aggregate_value_to_accumulator (thread_p, acc, domain, func_type, func_domain, &db_values[0]); + } + + // maybe this condition will be changed in the future based on the future arguments conditions + for (DB_VALUE &db_value : db_values) + { + if (DB_IS_NULL (&db_value)) + { + return NO_ERROR; + } + } + + switch (func_type) + { + case PT_JSON_OBJECTAGG: + if (db_json_objectagg_dbval_accumulate (&db_values[0], &db_values[1], acc->value) != NO_ERROR) + { + return ER_FAILED; + } + break; + + default: + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QPROC_INVALID_XASLNODE, 0); + return ER_FAILED; + } + + return NO_ERROR; +} +/* *INDENT-ON* */ + /* * qdata_evaluate_aggregate_list () - * return: NO_ERROR, or ER_code @@ -6791,18 +6886,21 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis { AGGREGATE_TYPE *agg_p; AGGREGATE_ACCUMULATOR *accumulator; - DB_VALUE dbval, *percentile_val = NULL; + DB_VALUE *percentile_val = NULL; PR_TYPE *pr_type_p; DB_TYPE dbval_type; OR_BUF buf; char *disk_repr_p = NULL; int dbval_size, i, error; AGGREGATE_PERCENTILE_INFO *percentile = NULL; - - db_make_null (&dbval); + DB_VALUE *db_value_p = NULL; for (agg_p = agg_list_p, i = 0; agg_p != NULL; agg_p = agg_p->next, i++) { + /* *INDENT-OFF* */ + std::vector db_values; + /* *INDENT-ON* */ + /* determine accumulator */ accumulator = (alt_acc_list != NULL ? &alt_acc_list[i] : &agg_p->accumulator); @@ -6840,24 +6938,69 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis continue; } + /* fetch operands value. aggregate regulator variable should only contain constants */ + REGU_VARIABLE_LIST operand = NULL; + for (operand = agg_p->operands; operand != NULL; operand = operand->next) + { + // create an empty value + db_values.emplace_back (); + + // fetch it + if (fetch_copy_dbval (thread_p, &operand->value, val_desc_p, NULL, NULL, NULL, + &db_values.back ()) != NO_ERROR) + { + pr_clear_value_vector (db_values); + return ER_FAILED; + } + } + /* - * fetch operand value. aggregate regulator variable should only - * contain constants + * eliminate null values + * consider only the first argument, because for the rest will depend on the function */ - if (fetch_copy_dbval (thread_p, &agg_p->operand, val_desc_p, NULL, NULL, NULL, &dbval) != NO_ERROR) + db_value_p = &db_values[0]; + if (DB_IS_NULL (db_value_p)) { - return ER_FAILED; + /* + * for JSON_ARRAYAGG we need to include also NULL values in the result set + * so we need to construct a NULL JSON value + */ + if (agg_p->function == PT_JSON_ARRAYAGG) + { + // this creates a new JSON_DOC with the type DB_JSON_NULL + db_make_json (db_value_p, db_json_allocate_doc (), true); + } + /* + * for JSON_OBJECTAGG we need to include keep track of key-value pairs + * the key can not be NULL so this will throw an error + * the value can be NULL and we will wrap this into a JSON with DB_JSON_NULL type in the next statement + */ + else if (agg_p->function == PT_JSON_OBJECTAGG) + { + pr_clear_value_vector (db_values); + return ER_FAILED; + } + else + { + if ((agg_p->function == PT_COUNT || agg_p->function == PT_COUNT_STAR) && DB_IS_NULL (accumulator->value)) + { + /* we might get a NULL count if aggregating with hash table and group has only one tuple; correct that */ + db_make_int (accumulator->value, 0); + } + pr_clear_value_vector (db_values); + continue; + } } - /* eliminate null values */ - if (DB_IS_NULL (&dbval)) + /* + * for JSON_OBJECTAGG, we wrap the second argument with a null JSON only if the value is NULL + */ + if (agg_p->function == PT_JSON_OBJECTAGG) { - if ((agg_p->function == PT_COUNT || agg_p->function == PT_COUNT_STAR) && DB_IS_NULL (accumulator->value)) + if (DB_IS_NULL (&db_values[1])) { - /* we might get a NULL count if aggregating with hash table and group has only one tuple; correct that */ - db_make_int (accumulator->value, 0); + db_make_json (&db_values[1], db_json_allocate_doc (), true); } - continue; } /* @@ -6871,55 +7014,55 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis if (QPROC_IS_INTERPOLATION_FUNC (agg_p)) { /* never be null type */ - assert (!DB_IS_NULL (&dbval)); + assert (!DB_IS_NULL (db_value_p)); - error = qdata_update_agg_interpolation_func_value_and_domain (agg_p, &dbval); + error = qdata_update_agg_interpolation_func_value_and_domain (agg_p, db_value_p); if (error != NO_ERROR) { - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return ER_FAILED; } } - dbval_type = DB_VALUE_DOMAIN_TYPE (&dbval); + dbval_type = DB_VALUE_DOMAIN_TYPE (db_value_p); pr_type_p = PR_TYPE_FROM_ID (dbval_type); if (pr_type_p == NULL) { - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return ER_FAILED; } - dbval_size = pr_data_writeval_disk_size (&dbval); + dbval_size = pr_data_writeval_disk_size (db_value_p); if (dbval_size > 0 && (disk_repr_p = (char *) db_private_alloc (thread_p, dbval_size)) != NULL) { OR_BUF_INIT (buf, disk_repr_p, dbval_size); - error = (*(pr_type_p->data_writeval)) (&buf, &dbval); + error = (*(pr_type_p->data_writeval)) (&buf, db_value_p); if (error != NO_ERROR) { /* ER_TF_BUFFER_OVERFLOW means that val_size or packing is bad. */ assert (error != ER_TF_BUFFER_OVERFLOW); db_private_free_and_init (thread_p, disk_repr_p); - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return ER_FAILED; } } else { - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return ER_FAILED; } if (qfile_add_item_to_list (thread_p, disk_repr_p, dbval_size, agg_p->list_id) != NO_ERROR) { db_private_free_and_init (thread_p, disk_repr_p); - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return ER_FAILED; } db_private_free_and_init (thread_p, disk_repr_p); - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); /* for PERCENTILE funcs, we have to check percentile value */ if (agg_p->function != PT_PERCENTILE_CONT && agg_p->function != PT_PERCENTILE_DISC) @@ -6990,18 +7133,19 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis case DB_TYPE_TIME: break; default: - assert (agg_p->operand.type == TYPE_CONSTANT || agg_p->operand.type == TYPE_DBVAL); + assert (agg_p->operands->value.type == TYPE_CONSTANT || + agg_p->operands->value.type == TYPE_DBVAL); /* try to cast dbval to double, datetime then time */ tmp_domain_p = tp_domain_resolve_default (DB_TYPE_DOUBLE); - status = tp_value_cast (&dbval, &dbval, tmp_domain_p, false); + status = tp_value_cast (db_value_p, db_value_p, tmp_domain_p, false); if (status != DOMAIN_COMPATIBLE) { /* try datetime */ tmp_domain_p = tp_domain_resolve_default (DB_TYPE_DATETIME); - status = tp_value_cast (&dbval, &dbval, tmp_domain_p, false); + status = tp_value_cast (db_value_p, db_value_p, tmp_domain_p, false); } /* try time */ @@ -7009,7 +7153,7 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis { tmp_domain_p = tp_domain_resolve_default (DB_TYPE_TIME); - status = tp_value_cast (&dbval, &dbval, tmp_domain_p, false); + status = tp_value_cast (db_value_p, db_value_p, tmp_domain_p, false); } if (status != DOMAIN_COMPATIBLE) @@ -7018,7 +7162,7 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 2, qdump_function_type_string (agg_p->function), "DOUBLE, DATETIME, TIME"); - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return error; } @@ -7027,17 +7171,17 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis } pr_clear_value (agg_p->accumulator.value); - error = pr_clone_value (&dbval, agg_p->accumulator.value); + error = pr_clone_value (db_value_p, agg_p->accumulator.value); if (error != NO_ERROR) { - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); return error; } } } /* clear value */ - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); /* percentile value check */ if (agg_p->function == PT_PERCENTILE_CONT || agg_p->function == PT_PERCENTILE_DISC) @@ -7057,18 +7201,18 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis /* group concat function requires special care */ if (agg_p->accumulator.curr_cnt < 1) { - error = qdata_group_concat_first_value (thread_p, agg_p, &dbval); + error = qdata_group_concat_first_value (thread_p, agg_p, db_value_p); } else { - error = qdata_group_concat_value (thread_p, agg_p, &dbval); + error = qdata_group_concat_value (thread_p, agg_p, db_value_p); } /* increment tuple count */ agg_p->accumulator.curr_cnt++; /* clear value */ - pr_clear_value (&dbval); + pr_clear_value_vector (db_values); /* check error */ if (error != NO_ERROR) @@ -7079,14 +7223,14 @@ qdata_evaluate_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_lis else { /* aggregate value */ - error = qdata_aggregate_value_to_accumulator (thread_p, accumulator, &agg_p->accumulator_domain, - agg_p->function, agg_p->domain, &dbval); + error = qdata_aggregate_multiple_values_to_accumulator (thread_p, accumulator, &agg_p->accumulator_domain, + agg_p->function, agg_p->domain, db_values); /* increment tuple count */ accumulator->curr_cnt++; - /* clear value */ - pr_clear_value (&dbval); + /* clear values */ + pr_clear_value_vector (db_values); /* handle error */ if (error != NO_ERROR) @@ -8471,12 +8615,24 @@ qdata_evaluate_function (THREAD_ENTRY * thread_p, REGU_VARIABLE * function_p, VA case F_JSON_ARRAY_APPEND: return qdata_json_array_append (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + case F_JSON_ARRAY_INSERT: + return qdata_json_array_insert (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + + case F_JSON_SEARCH: + return qdata_json_search (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + + case F_JSON_CONTAINS_PATH: + return qdata_json_contains_path (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + case F_JSON_GET_ALL_PATHS: return qdata_json_get_all_paths (thread_p, funcp, val_desc_p, obj_oid_p, tuple); case F_JSON_MERGE: return qdata_json_merge (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + case F_JSON_MERGE_PATCH: + return qdata_json_merge_patch (thread_p, funcp, val_desc_p, obj_oid_p, tuple); + default: er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QPROC_INVALID_XASLNODE, 0); return ER_FAILED; @@ -10213,6 +10369,30 @@ qdata_json_array_append (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VA obj_oid_p, tuple, db_json_array_append); } +static int +qdata_json_array_insert (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple) +{ + return qdata_convert_operands_to_value_and_call (thread_p, function_p, val_desc_p, + obj_oid_p, tuple, db_json_array_insert); +} + +static int +qdata_json_search (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple) +{ + return qdata_convert_operands_to_value_and_call (thread_p, function_p, val_desc_p, + obj_oid_p, tuple, db_json_search_dbval); +} + +static int +qdata_json_contains_path (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple) +{ + return qdata_convert_operands_to_value_and_call (thread_p, function_p, val_desc_p, + obj_oid_p, tuple, db_json_contains_path); +} + static int qdata_json_get_all_paths (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, QFILE_TUPLE tuple) @@ -10228,6 +10408,14 @@ qdata_json_merge (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR return qdata_convert_operands_to_value_and_call (thread_p, function_p, val_desc_p, obj_oid_p, tuple, db_json_merge); } +static int +qdata_json_merge_patch (THREAD_ENTRY * thread_p, FUNCTION_TYPE * function_p, VAL_DESCR * val_desc_p, OID * obj_oid_p, + QFILE_TUPLE tuple) +{ + return qdata_convert_operands_to_value_and_call (thread_p, function_p, val_desc_p, + obj_oid_p, tuple, db_json_merge_patch); +} + /* * qdata_get_cardinality () - gets the cardinality of an index using its name * and partial key count @@ -11958,9 +12146,9 @@ qdata_calculate_aggregate_cume_dist_percent_rank (THREAD_ENTRY * thread_p, AGGRE DB_DOMAIN *dom; HL_HEAPID save_heapid = 0; - assert (agg_p != NULL && agg_p->sort_list != NULL && agg_p->operand.type == TYPE_REGU_VAR_LIST); + assert (agg_p != NULL && agg_p->sort_list != NULL && agg_p->operands->value.type == TYPE_REGU_VAR_LIST); - regu_var_list = agg_p->operand.value.regu_var_list; + regu_var_list = agg_p->operands->value.value.regu_var_list; info_p = &agg_p->info.dist_percent; assert (regu_var_list != NULL && info_p != NULL); diff --git a/src/query/query_opfunc.h b/src/query/query_opfunc.h index 1ac5d8a12d8..69d9dd71f30 100644 --- a/src/query/query_opfunc.h +++ b/src/query/query_opfunc.h @@ -82,16 +82,27 @@ extern int qdata_strcat_dbval (DB_VALUE * dbval1, DB_VALUE * dbval2, DB_VALUE * extern int qdata_json_contains_dbval (DB_VALUE * dbval1, DB_VALUE * dbval2, DB_VALUE * dbval3, DB_VALUE * res, TP_DOMAIN * domain); extern int qdata_json_type_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); +extern int qdata_json_pretty_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); extern int qdata_json_valid_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); extern int qdata_json_length_dbval (DB_VALUE * dbval1_p, DB_VALUE * dbval2_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); +extern int qdata_json_unquote_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); extern int qdata_json_extract_dbval (const DB_VALUE * json, const DB_VALUE * path, DB_VALUE * json_res, TP_DOMAIN * domain_p); extern int qdata_json_depth_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); +extern int qdata_json_quote_dbval (DB_VALUE * dbval1_p, DB_VALUE * result_p, TP_DOMAIN * domain_p); extern int qdata_initialize_aggregate_list (THREAD_ENTRY * thread_p, AGGREGATE_TYPE * agg_list, QUERY_ID query_id); extern int qdata_aggregate_value_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMULATOR * acc, AGGREGATE_ACCUMULATOR_DOMAIN * domain, FUNC_TYPE func_type, TP_DOMAIN * func_domain, DB_VALUE * value); + +/* *INDENT-OFF* */ +extern int qdata_aggregate_multiple_values_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMULATOR * acc, + AGGREGATE_ACCUMULATOR_DOMAIN * domain, FUNC_TYPE func_type, + TP_DOMAIN * func_domain, + std::vector & db_values); +/* *INDENT-ON* */ + extern int qdata_aggregate_accumulator_to_accumulator (THREAD_ENTRY * thread_p, AGGREGATE_ACCUMULATOR * acc, AGGREGATE_ACCUMULATOR_DOMAIN * acc_dom, FUNC_TYPE func_type, TP_DOMAIN * func_domain, AGGREGATE_ACCUMULATOR * new_acc); diff --git a/src/query/regu_var.h b/src/query/regu_var.h index 7a05d04aafc..97adef73a5e 100644 --- a/src/query/regu_var.h +++ b/src/query/regu_var.h @@ -466,7 +466,7 @@ struct aggregate_list_node QUERY_OPTIONS option; /* DISTINCT/ALL option */ DB_TYPE opr_dbtype; /* Operand values data type */ DB_TYPE original_opr_dbtype; /* Original operand values data type */ - REGU_VARIABLE operand; /* operand */ + REGU_VARIABLE_LIST operands; /* list of operands (one operand per function argument) */ QFILE_LIST_ID *list_id; /* used for distinct handling */ int flag_agg_optimize; BTID btid; diff --git a/src/query/scan_json_table.cpp b/src/query/scan_json_table.cpp new file mode 100644 index 00000000000..257aa857d03 --- /dev/null +++ b/src/query/scan_json_table.cpp @@ -0,0 +1,630 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "scan_json_table.hpp" + +#include "access_json_table.hpp" +#include "db_json.hpp" +#include "dbtype.h" +#include "fetch.h" +#include "object_primitive.h" +#include "scan_manager.h" + +#include + +namespace cubscan +{ + namespace json_table + { + struct scanner::cursor + { + std::size_t m_child; // current child + cubxasl::json_table::node *m_node; // pointer to access node + JSON_DOC *m_input_doc; // input JSON document value + const JSON_DOC *m_process_doc; // for no expand, it matched input document. when node is expanded, it will + // point iterator value + bool m_is_row_fetched; // set to true when current row is fetched + bool m_need_advance_row; // set to true when next node action is to advance row + bool m_is_node_consumed; // set to true when all node rows (based on current input) are consumed + bool m_row_was_expanded; // set to true when row was expanded by at least one child. + // note: when all children are consumed, if row was never expanded, it is + // generated by leaving all children values as nil + + void advance_row_cursor (void); // advance to next row + void start_json_iterator (void); // start json iteration of changing input document + int fetch_row (void); // fetch current row (if not fetched) + void end (void); // finish current node scan + void delete_input_doc (); + + cursor (void); + ~cursor (void); + }; + + scanner::cursor::cursor (void) + : m_child (0) + , m_node (NULL) + , m_input_doc (NULL) + , m_process_doc (NULL) + , m_is_row_fetched (false) + , m_need_advance_row (false) + , m_is_node_consumed (true) + , m_row_was_expanded (false) + { + // + } + + scanner::cursor::~cursor (void) + { + delete_input_doc (); + } + + void + scanner::cursor::delete_input_doc () + { + if (m_input_doc != NULL) + { + db_json_delete_doc (m_input_doc); + } + } + + void + scanner::cursor::advance_row_cursor () + { + // don't advance again in row + m_need_advance_row = false; + + // reset row expansion + m_row_was_expanded = false; + + if (m_node->m_iterator == NULL || !db_json_iterator_has_next (*m_node->m_iterator)) + { + end (); + return; + } + + // advance with row + db_json_iterator_next (*m_node->m_iterator); + m_is_row_fetched = false; + + // advance also with ordinality + m_node->m_ordinality++; + + // reset child to first branch + m_child = 0; + } + + void + scanner::cursor::start_json_iterator (void) + { + // how it works: + // + // based on path definition, we have three cases + // + // [*] - expect and array and expand its elements into rows + // .* - expect an object and the values of its members into rows + // - all other paths will just generate one row based on the json object found at path + // + // When array or object expansion must happen, if the input document does not match expected JSON type, no rows + // will be generated. Expected types are DB_JSON_ARRAY of array expansion and DB_JSON_OBJECT for object + // expansion. + + switch (m_node->m_expand_type) + { + case json_table_expand_type::JSON_TABLE_NO_EXPAND: + // nothing to do; + m_is_node_consumed = false; + break; + + case json_table_expand_type::JSON_TABLE_ARRAY_EXPAND: + // only DB_JSON_ARRAY can be expanded + if (db_json_get_type (m_input_doc) == DB_JSON_ARRAY) + { + m_is_node_consumed = false; + db_json_set_iterator (m_node->m_iterator, *m_input_doc); + } + else + { + m_is_node_consumed = true; + } + break; + + case json_table_expand_type::JSON_TABLE_OBJECT_EXPAND: + // only DB_JSON_OBJECT can be expanded + if (db_json_get_type (m_input_doc) == DB_JSON_OBJECT) + { + m_is_node_consumed = false; + db_json_set_iterator (m_node->m_iterator, *m_input_doc); + } + else + { + m_is_node_consumed = true; + } + break; + + default: + assert (false); + break; + } + } + + int + scanner::cursor::fetch_row (void) + { + if (m_is_row_fetched) + { + // already fetched + return NO_ERROR; + } + + // if we have an iterator, value is obtained from iterator. otherwise, use m_input_doc + if (m_node->m_iterator != NULL) + { + m_process_doc = db_json_iterator_get_document (*m_node->m_iterator); + } + else + { + assert (m_node->m_expand_type == json_table_expand_type::JSON_TABLE_NO_EXPAND); + m_process_doc = m_input_doc; + } + + if (m_process_doc == NULL) + { + assert (false); + return ER_FAILED; + } + + int error_code = NO_ERROR; + for (size_t i = 0; i < m_node->m_output_columns_size; ++i) + { + error_code = m_node->m_output_columns[i].evaluate (*m_process_doc, m_node->m_ordinality); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + + return NO_ERROR; + } + + void + scanner::cursor::end (void) + { + m_is_node_consumed = true; + + db_json_reset_iterator (m_node->m_iterator); + + m_process_doc = NULL; + m_node->clear_columns (false); + } + + size_t + scanner::get_tree_height (const cubxasl::json_table::node &node) + { + size_t max_child_height = 0; + + for (size_t i = 0; i < node.m_nested_nodes_size; ++i) + { + const cubxasl::json_table::node &child = node.m_nested_nodes[i]; + max_child_height = std::max (max_child_height, get_tree_height (child)); + } + + return 1 + max_child_height; + } + + void + scanner::init (cubxasl::json_table::spec_node &spec) + { + m_specp = &spec; + + assert (m_specp->m_node_count > 0); + + m_tree_height = get_tree_height (*m_specp->m_root_node); + + m_scan_cursor = new cursor[m_tree_height]; + + // init cursor nodes to left-most branch + json_table_node *t = m_specp->m_root_node; + m_scan_cursor[0].m_node = t; + for (int i = 1; t->m_nested_nodes_size != 0; t = &t->m_nested_nodes[0], ++i) + { + m_scan_cursor[i].m_node = t; + } + + init_iterators (*m_specp->m_root_node); + } + + void + scanner::clear (xasl_node *xasl_p, bool is_final, bool is_final_clear) + { + // columns should be released every time + m_specp->m_root_node->clear_tree (is_final_clear); + reset_ordinality (*m_specp->m_root_node); + + // all json documents should be release depending on is_final + if (is_final) + { + for (size_t i = 0; i < m_tree_height; ++i) + { + cursor &cursor = m_scan_cursor[i]; + cursor.delete_input_doc (); + + cursor.m_child = 0; + cursor.m_is_row_fetched = false; + } + + m_specp->m_root_node->clear_iterators (is_final_clear); + + if (is_final_clear) + { + delete [] m_scan_cursor; + } + } + } + + int + scanner::open (cubthread::entry *thread_p) + { + int error_code = NO_ERROR; + const JSON_DOC *document = NULL; + + // so... we need to generate the whole list file + + // we need the starting value to expand into a list of records + DB_VALUE *value_p = NULL; + error_code = fetch_peek_dbval (thread_p, m_specp->m_json_reguvar, m_vd, NULL, NULL, NULL, &value_p); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + if (value_p == NULL || db_value_is_null (value_p)) + { + assert (false); + return ER_FAILED; + } + + // build m_scan_cursor + + if (db_value_type (value_p) == DB_TYPE_JSON) + { + document = db_get_json_document (value_p); + + error_code = init_cursor (*document, *m_specp->m_root_node, m_scan_cursor[0]); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + else + { + // we need json + DB_VALUE json_cast_value; + + // we should use explicit coercion, implicit coercion is not allowed between char and json + tp_domain_status status = tp_value_cast (value_p, &json_cast_value, &tp_Json_domain, false); + if (status != DOMAIN_COMPATIBLE) + { + ASSERT_ERROR_AND_SET (error_code); + return error_code; + } + + document = db_get_json_document (&json_cast_value); + + error_code = init_cursor (*document, *m_specp->m_root_node, m_scan_cursor[0]); + + pr_clear_value (&json_cast_value); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + + // if we gather expr from another table, for each row we need to reset the ordinality + reset_ordinality (*m_specp->m_root_node); + + return NO_ERROR; + } + + void + scanner::end (cubthread::entry *thread_p) + { + assert (thread_p != NULL); + } + + int + scanner::next_scan (cubthread::entry *thread_p, scan_id_struct &sid) + { + bool has_row = false; + int error_code = NO_ERROR; + DB_LOGICAL logical = V_FALSE; + + if (sid.position == S_BEFORE) + { + error_code = open (thread_p); + if (error_code != NO_ERROR) + { + return error_code; + } + sid.position = S_ON; + sid.status = S_STARTED; + } + else if (sid.position != S_ON) + { + assert (false); + sid.status = S_ENDED; + sid.position = S_AFTER; + return ER_FAILED; + } + + while (true) + { + error_code = scan_next_internal (thread_p, 0, has_row); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + if (!has_row) + { + sid.status = S_ENDED; + sid.position = S_AFTER; + break; + } + + if (m_scan_predicate.pred_expr == NULL) + { + break; + } + + logical = m_scan_predicate.pr_eval_fnc (thread_p, m_scan_predicate.pred_expr, sid.vd, NULL); + if (logical == V_TRUE) + { + break; + } + if (logical == V_ERROR) + { + ASSERT_ERROR_AND_SET (error_code); + return error_code; + } + } + + return NO_ERROR; + } + + int + scanner::set_input_document (cursor &cursor_arg, const cubxasl::json_table::node &node, const JSON_DOC &document) + { + int error_code = NO_ERROR; + + // extract input document + error_code = db_json_extract_document_from_path (&document, node.m_path, cursor_arg.m_input_doc); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + if (cursor_arg.m_input_doc == nullptr) + { + // cannot retrieve input_doc from path + cursor_arg.m_is_node_consumed = true; + } + else + { + // start cursor based on input document + cursor_arg.start_json_iterator (); + } + + return NO_ERROR; + } + + int + scanner::init_cursor (const JSON_DOC &doc, cubxasl::json_table::node &node, cursor &cursor_out) + { + cursor_out.m_is_row_fetched = false; + cursor_out.m_child = 0; + cursor_out.m_node = &node; + + return set_input_document (cursor_out, node, doc); + } + + int + scanner::set_next_cursor (const cursor ¤t_cursor, int next_depth) + { + return init_cursor (*current_cursor.m_process_doc, + current_cursor.m_node->m_nested_nodes[current_cursor.m_child], + m_scan_cursor[next_depth]); + } + + void + scanner::clear_node_columns (cubxasl::json_table::node &node) + { + for (size_t i = 0; i < node.m_output_columns_size; ++i) + { + (void) pr_clear_value (node.m_output_columns[i].m_output_value_pointer); + (void) db_make_null (node.m_output_columns[i].m_output_value_pointer); + } + } + + void + scanner::init_iterators (cubxasl::json_table::node &node) + { + node.init_iterator (); + + for (size_t i = 0; i < node.m_nested_nodes_size; ++i) + { + init_iterators (node.m_nested_nodes[i]); + } + } + + void + scanner::reset_ordinality (cubxasl::json_table::node &node) + { + node.m_ordinality = 1; + + for (size_t i = 0; i < node.m_nested_nodes_size; ++i) + { + reset_ordinality (node.m_nested_nodes[i]); + } + } + + int + scanner::scan_next_internal (cubthread::entry *thread_p, int depth, bool &found_row_output) + { + int error_code = NO_ERROR; + cursor &this_cursor = m_scan_cursor[depth]; + + // check if cursor is already in child node + if (m_scan_cursor_depth >= depth + 1) + { + // advance to child + error_code = scan_next_internal (thread_p, depth + 1, found_row_output); + if (error_code != NO_ERROR) + { + return error_code; + } + if (found_row_output) + { + // advance to new child + return NO_ERROR; + } + else + { + this_cursor.m_child++; + } + } + + // get the cursor from the current depth + assert (this_cursor.m_node != NULL); + + // loop through node's rows and children until all possible rows are generated + while (!this_cursor.m_is_node_consumed) + { + // note - do not loop without taking new action + // an action is either advancing to new row or advancing to new child + if (this_cursor.m_need_advance_row) + { + this_cursor.advance_row_cursor (); + if (this_cursor.m_is_node_consumed) + { + break; + } + } + + // first things first, fetch current row + error_code = this_cursor.fetch_row (); + if (error_code != NO_ERROR) + { + return error_code; + } + + // if this is leaf node, then we have a new complete row + if (this_cursor.m_node->m_nested_nodes_size == 0) + { + found_row_output = true; + // next time, cursor will have to be incremented + this_cursor.m_need_advance_row = true; + return NO_ERROR; + } + + // non-leaf + // advance to current child + if (this_cursor.m_child == this_cursor.m_node->m_nested_nodes_size) + { + // next time, cursor will have to be incremented + this_cursor.m_need_advance_row = true; + + if (this_cursor.m_row_was_expanded) + { + continue; + } + + found_row_output = true; + return NO_ERROR; + } + + // create cursor for next child + error_code = set_next_cursor (this_cursor, depth + 1); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + cursor &next_cursor = m_scan_cursor[depth + 1]; + + if (!next_cursor.m_is_node_consumed) + { + // advance current level in tree + m_scan_cursor_depth++; + + // expanded successfully on this level + this_cursor.m_row_was_expanded = true; + + error_code = scan_next_internal (thread_p, depth + 1, found_row_output); + if (error_code != NO_ERROR) + { + return error_code; + } + } + else + { + this_cursor.m_child++; + continue; + } + + if (found_row_output) + { + // found a row; scan is stopped + return NO_ERROR; + } + else + { + // child could not generate a row. advance to next + this_cursor.m_child++; + } + } + + // no more rows... + found_row_output = false; + + if (m_scan_cursor_depth > 0) + { + // remove this cursor + m_scan_cursor_depth--; + } + + return NO_ERROR; + } + + SCAN_PRED & + scanner::get_predicate () + { + return m_scan_predicate; + } + + void + scanner::set_value_descriptor (val_descr *vd) + { + m_vd = vd; + } + } // namespace json_table +} // namespace cubscan diff --git a/src/query/scan_json_table.hpp b/src/query/scan_json_table.hpp new file mode 100644 index 00000000000..cbeb8fd8dc3 --- /dev/null +++ b/src/query/scan_json_table.hpp @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// scan_json_table.hpp - interface json table scanning +// +// JSON Table Scanner explained +// +// Behavior - todo - add manual reference here +// +// The syntax of JSON table is something like this: +// ... JSON_TABLE (expression, +// '$.[*] COLUMNS(rownum FOR ORDINALITY. +// a STRING PATH '$.a', +// b INT EXISTS '$.b', +// NESTED PATH '$.arr[*]' COLUMNS (c JSON PATH '$.c') as jt +// WHERE b != 0 and a > d... +// +// Expression is the input JSON for table. Each element found in the array of COLUMNS path is expanded into a row. +// (above example expands root of JSON and then the array at $.c). +// +// For each row found in root ('$'), column 'a' is value of $.a converted to string, column 'b' is 1 if $.b exists +// and 0 otherwise; these values are repeated for each element found in $.arr, extracting the value of $.arr[*].c +// +// Rows that do not pass the WHERE check are filtered. +// +// NOTE: if there are multiple nested paths to the same node, they are not cross-joined. while one nested path is +// expanded, the values for sibling nested paths will be all null. +// +// +// Implementation +// +// A root scan node is always used based on the input JSON (result of expression) and the first COLUMNS path. For +// each NESTED PATH, a child scan node is generated (a node may have no, one or multiple children scan nodes). +// +// Each scanner::next_scan call generates one row, or none if it was consumed entirely. It starts by generating a +// small row for root node. If it is has (nested) children, for each child one by one, it computes the input node by +// extracting nested node path from its root input and repeats same process until a leaf node reached. +// +// When a leaf-level node row is generated, the scan row is considered complete and next_scan returns success. +// +// A "breadcrumb" like cursor is used to remember where last row is generated. It generates a new row on the same +// leaf node if possible, or clears all values for this node and returns to its parent (non-leaf node). +// +// The parent will then try to advance to another children, or if all children have been processed, it will generate +// a new row. +// +// The process is repeated recursively until all nodes have been consumed and other rows can no longer be generated. +// +// +// Future +// +// Rows are filtered after a complete row is generated. We could partition the scan predicate on scan nodes and +// filter invalid rows at node level, cutting of an entire branch of rows that would all be invalid. +// + +#ifndef _SCAN_JSON_TABLE_HPP_ +#define _SCAN_JSON_TABLE_HPP_ + +//#include "dbtype_def.h" +#include "query_evaluator.h" + +#include + +// forward definitions +// access_json_table.hpp +namespace cubxasl +{ + namespace json_table + { + struct spec_node; + struct node; + struct column; + } +} +// db_json.hpp +class JSON_DOC; +class JSON_ITERATOR; +// scan_manager.h +struct scan_id_struct; +struct val_descr; + +// thread_entry.hpp +namespace cubthread +{ + class entry; +} + +namespace cubscan +{ + namespace json_table + { + class scanner + { + public: + + // initialize scanner + void init (cubxasl::json_table::spec_node &spec); + // clear scanner + void clear (xasl_node *xasl_p, bool is_final, bool is_final_clear); + + // open a new scan + int open (cubthread::entry *thread_p); + // end a scan + void end (cubthread::entry *thread_p); + + // next_scan - generate a new row + // + // returns error code or NO_ERROR + // + // sid (in/out) : status and position is updated based on the success of scan + int next_scan (cubthread::entry *thread_p, scan_id_struct &sid); + + SCAN_PRED &get_predicate (); + void set_value_descriptor (val_descr *vd); + + scanner () = default; + + private: + // cursor used to track scanner progress and resume scan on each scan_next call; implementation in cpp file + struct cursor; + + // clear columns fetched values + void clear_node_columns (cubxasl::json_table::node &node); + // reset node ordinality (row number) + void reset_ordinality (cubxasl::json_table::node &node); + + // init iterators considering the expansion type + void init_iterators (cubxasl::json_table::node &node); + + // cursor functions + int init_cursor (const JSON_DOC &doc, cubxasl::json_table::node &node, cursor &cursor_out); + int set_next_cursor (const cursor ¤t_cursor, int next_depth); + + // to start scanning a node, an input document is set + int set_input_document (cursor &cursor, const cubxasl::json_table::node &node, const JSON_DOC &document); + + // compute scan tree height; recursive function + size_t get_tree_height (const cubxasl::json_table::node &node); + + // recursive scan next called on json table node / cursor + int scan_next_internal (cubthread::entry *thread_p, int depth, bool &found_row_output); + + cubxasl::json_table::spec_node *m_specp; // pointer to json table spec node in XASL + cursor *m_scan_cursor; // cursor to keep track progress in each scan node + size_t m_scan_cursor_depth; // the current level where the cursor was left + size_t m_tree_height; // will be used to initialize cursor vector + scan_pred m_scan_predicate; // scan predicate to filter generated rows + val_descr *m_vd; + }; + } // namespace json_table +} // namespace cubscan + +// naming convention of SCAN_ID's +using JSON_TABLE_SCAN_ID = cubscan::json_table::scanner; + +#endif // _SCAN_JSON_TABLE_HPP_ diff --git a/src/query/scan_manager.c b/src/query/scan_manager.c index aace3873e23..97c508ae1b4 100644 --- a/src/query/scan_manager.c +++ b/src/query/scan_manager.c @@ -174,6 +174,7 @@ static SCAN_CODE scan_next_index_lookup_heap (THREAD_ENTRY * thread_p, SCAN_ID * static SCAN_CODE scan_next_list_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); static SCAN_CODE scan_next_showstmt_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); static SCAN_CODE scan_next_set_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); +static SCAN_CODE scan_next_json_table_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); static SCAN_CODE scan_next_value_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); static SCAN_CODE scan_next_method_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id); static SCAN_CODE scan_handle_single_scan (THREAD_ENTRY * thread_p, SCAN_ID * s_id, QP_SCAN_FUNC next_scan); @@ -3640,7 +3641,7 @@ scan_open_list_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, scan_init_scan_pred (&llsidp->scan_pred, regu_list_pred, pr, ((pr) ? eval_fnc (thread_p, pr, &single_node_type) : NULL)); - /* regulator vairable list for other than predicates */ + /* regulator variable list for other than predicates */ llsidp->rest_regu_list = regu_list_rest; return NO_ERROR; @@ -3835,6 +3836,37 @@ scan_open_set_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, return NO_ERROR; } +/* + * scan_open_json_table_scan () - + * return: NO_ERROR + * scan_id(out): Scan identifier + * grouped(in): + * single_fetch(in): + * join_dbval(in): + * val_list(in): + * vd(in): + */ +int +scan_open_json_table_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, int grouped, QPROC_SINGLE_FETCH single_fetch, + DB_VALUE * join_dbval, VAL_LIST * val_list, VAL_DESCR * vd, PRED_EXPR * pr) +{ + DB_TYPE single_node_type = DB_TYPE_NULL; + + /* scan type is JSON_TABLE SCAN */ + scan_id->type = S_JSON_TABLE_SCAN; + + /* initialize SCAN_ID structure */ + /* mvcc_select_lock_needed = false, fixed = true */ + scan_init_scan_id (scan_id, false, S_SELECT, true, grouped, single_fetch, join_dbval, val_list, vd); + + // scan_init_scan_pred + scan_init_scan_pred (&scan_id->s.jtid.get_predicate (), NULL, pr, + ((pr) ? eval_fnc (thread_p, pr, &single_node_type) : NULL)); + scan_id->s.jtid.set_value_descriptor (vd); + + return NO_ERROR; +} + /* * scan_open_method_scan () - * return: NO_ERROR, or ER_code @@ -3888,6 +3920,7 @@ scan_start_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) REGU_VALUE_LIST *regu_value_list = NULL; REGU_VARIABLE_LIST list_node = NULL; MVCC_SNAPSHOT *mvcc_snapshot = NULL; + JSON_TABLE_SCAN_ID *jtidp = NULL; switch (scan_id->type) { @@ -4142,6 +4175,11 @@ scan_start_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) db_make_null (&ssidp->set); break; + case S_JSON_TABLE_SCAN: + jtidp = &scan_id->s.jtid; + // todo: what else to add here? + break; + case S_METHOD_SCAN: break; @@ -4286,6 +4324,7 @@ scan_reset_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) case S_CLASS_ATTR_SCAN: case S_SET_SCAN: + case S_JSON_TABLE_SCAN: s_id->position = S_BEFORE; break; @@ -4295,7 +4334,6 @@ scan_reset_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) break; } /* switch (s_id->type) */ - return status; } @@ -4322,8 +4360,8 @@ scan_next_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) case S_HEAP_SCAN_RECORD_INFO: case S_HEAP_PAGE_SCAN: if (s_id->grouped) - { /* grouped, fixed scan */ - + { + /* grouped, fixed scan */ if (s_id->direction == S_FORWARD) { sp_scan = heap_scanrange_to_following (thread_p, &s_id->s.hsid.scan_range, NULL); @@ -4333,13 +4371,39 @@ scan_next_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) sp_scan = heap_scanrange_to_prior (thread_p, &s_id->s.hsid.scan_range, NULL); } - return ((sp_scan == S_SUCCESS) ? S_SUCCESS : (sp_scan == S_END) ? S_END : S_ERROR); + if (sp_scan == S_SUCCESS || sp_scan == S_END) + { + return sp_scan; + } + else + { + return S_ERROR; + } } else { - return ((s_id->direction == - S_FORWARD) ? ((s_id->position == S_BEFORE) ? S_SUCCESS : S_END) : ((s_id->position == - S_AFTER) ? S_SUCCESS : S_END)); + if (s_id->direction == S_FORWARD) + { + if (s_id->position == S_BEFORE) + { + return S_SUCCESS; + } + else + { + return S_END; + } + } + else + { + if (s_id->position == S_AFTER) + { + return S_SUCCESS; + } + else + { + return S_END; + } + } } case S_INDX_SCAN: @@ -4385,7 +4449,6 @@ scan_next_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) s_id->position = S_AFTER; return S_END; } - } else { @@ -4406,6 +4469,7 @@ scan_next_scan_block (THREAD_ENTRY * thread_p, SCAN_ID * s_id) case S_SHOWSTMT_SCAN: case S_SET_SCAN: case S_METHOD_SCAN: + case S_JSON_TABLE_SCAN: case S_VALUES_SCAN: return (s_id->position == S_BEFORE) ? S_SUCCESS : S_END; @@ -4431,6 +4495,7 @@ scan_end_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) REGU_VALUES_SCAN_ID *rvsidp; SET_SCAN_ID *ssidp; KEY_VAL_RANGE *key_vals; + JSON_TABLE_SCAN_ID *jtidp; int i; if (scan_id == NULL) @@ -4524,6 +4589,11 @@ scan_end_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) pr_clear_value (&ssidp->set); break; + case S_JSON_TABLE_SCAN: + jtidp = &scan_id->s.jtid; + jtidp->end (thread_p); + break; + case S_METHOD_SCAN: break; @@ -4682,6 +4752,9 @@ scan_close_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) method_close_scan (thread_p, &scan_id->s.vaid.scan_buf); break; + case S_JSON_TABLE_SCAN: + break; + default: /* S_VALUES_SCAN */ break; @@ -4853,6 +4926,10 @@ scan_next_scan_local (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) status = scan_next_set_scan (thread_p, scan_id); break; + case S_JSON_TABLE_SCAN: + status = scan_next_json_table_scan (thread_p, scan_id); + break; + case S_METHOD_SCAN: status = scan_next_method_scan (thread_p, scan_id); break; @@ -6443,7 +6520,6 @@ scan_next_set_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) /* evaluate set expression and put resultant set in DB_VALUE */ while ((qp_scan = qproc_next_set_scan (thread_p, scan_id)) == S_SUCCESS) { - assert (scan_id->val_list != NULL); assert (scan_id->val_list->val_cnt == 1); @@ -6496,12 +6572,39 @@ scan_next_set_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) } return S_SUCCESS; - } /* while ((qp_scan = ) == S_SUCCESS) */ return qp_scan; } +/* + * scan_next_json_table_scan () - The scan is moved to the next json_table scan item. + * return: SCAN_CODE (S_SUCCESS, S_END, S_ERROR) + * scan_id(in/out): Scan identifier + * + * Note: If there are no more scan items, S_END is returned. If an error occurs, S_ERROR is returned. + */ +static SCAN_CODE +scan_next_json_table_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) +{ + int error_code = NO_ERROR; + + // the status of the scan will be put in scan_id->status + error_code = scan_id->s.jtid.next_scan (thread_p, *scan_id); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return S_ERROR; + } + + if (scan_id->status == S_ENDED) + { + return S_END; + } + + return S_SUCCESS; +} + /* * scan_next_method_scan () - The scan is moved to the next method scan item. * return: SCAN_CODE (S_SUCCESS, S_END, S_ERROR) @@ -6540,7 +6643,6 @@ scan_next_method_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) for (src_valp = vl.valp, dest_valp = scan_id->val_list->valp; src_valp && dest_valp; src_valp = src_valp->next, dest_valp = dest_valp->next) { - if (DB_IS_NULL (src_valp->val)) { pr_clear_value (dest_valp->val); @@ -6556,9 +6658,9 @@ scan_next_method_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) { return S_ERROR; } + pr_clear_value (src_valp->val); free_and_init (src_valp->val); - } return S_SUCCESS; @@ -6743,7 +6845,6 @@ scan_prev_scan_local (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) DB_LOGICAL ev_res; QFILE_TUPLE_RECORD tplrec; - switch (scan_id->type) { case S_LIST_SCAN: @@ -6754,7 +6855,6 @@ scan_prev_scan_local (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) while ((qp_scan = qfile_scan_list_prev (thread_p, &llsidp->lsid, &tplrec, PEEK)) == S_SUCCESS) { - /* fetch the values for the predicate from the tuple */ if (scan_id->val_list) { @@ -6829,9 +6929,10 @@ scan_prev_scan_local (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) llsidp->tplrecp->size = tplrec.size; llsidp->tplrecp->tpl = tplrec.tpl; } + return S_SUCCESS; + } - } /* while ((qp_scan = ...) == S_SUCCESS) */ if (qp_scan == S_END) { scan_id->position = S_BEFORE; @@ -6843,7 +6944,6 @@ scan_prev_scan_local (THREAD_ENTRY * thread_p, SCAN_ID * scan_id) er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QPROC_INVALID_XASLNODE, 0); return S_ERROR; } /* switch (scan_id->type) */ - } /* @@ -6876,7 +6976,6 @@ scan_save_scan_pos (SCAN_ID * s_id, SCAN_POS * scan_pos) qfile_save_current_scan_tuple_position (&s_id->s.llsid.lsid, &scan_pos->ls_tplpos); } - /* * scan_jump_scan_pos () - Jump to the given scan position and move the scan * from that point on in the forward direction. @@ -7232,12 +7331,10 @@ resolve_domain_on_regu_operand (REGU_VARIABLE * regu_var, VAL_LIST * ref_val_lis regu_var->domain = p_type_list->domp[pos]; } } - } /* - * scan_init_multi_range_optimization () - initialize structure for multiple - * range optimization + * scan_init_multi_range_optimization () - initialize structure for multiple range optimization * * return: error code * @@ -7306,8 +7403,7 @@ scan_init_multi_range_optimization (THREAD_ENTRY * thread_p, MULTI_RANGE_OPT * m } /* - * scan_dump_key_into_tuple () - outputs the value stored in 'key' into the - * tuple 'tplrec' + * scan_dump_key_into_tuple () - outputs the value stored in 'key' into the tuple 'tplrec' * * return: error code * iscan_id(in): @@ -7332,9 +7428,8 @@ scan_dump_key_into_tuple (THREAD_ENTRY * thread_p, INDX_SCAN_ID * iscan_id, DB_V return ER_FAILED; } - error = - btree_attrinfo_read_dbvalues (thread_p, key, iscan_id->bt_attr_ids, iscan_id->bt_num_attrs, - iscan_id->rest_attrs.attr_cache, -1); + error = btree_attrinfo_read_dbvalues (thread_p, key, iscan_id->bt_attr_ids, iscan_id->bt_num_attrs, + iscan_id->rest_attrs.attr_cache, -1); if (error != NO_ERROR) { return error; @@ -7351,10 +7446,8 @@ scan_dump_key_into_tuple (THREAD_ENTRY * thread_p, INDX_SCAN_ID * iscan_id, DB_V return error; } - error = - qdata_copy_valptr_list_to_tuple (thread_p, iscan_id->indx_cov.output_val_list, iscan_id->indx_cov.val_descr, - tplrec); - + error = qdata_copy_valptr_list_to_tuple (thread_p, iscan_id->indx_cov.output_val_list, iscan_id->indx_cov.val_descr, + tplrec); if (error != NO_ERROR) { return error; @@ -7365,7 +7458,6 @@ scan_dump_key_into_tuple (THREAD_ENTRY * thread_p, INDX_SCAN_ID * iscan_id, DB_V #if defined (SERVER_MODE) - /* * scan_print_stats_json () - * return: @@ -7381,12 +7473,13 @@ scan_print_stats_json (SCAN_ID * scan_id, json_t * scan_stats) return; } - scan = - json_pack ("{s:i, s:I, s:I}", "time", TO_MSEC (scan_id->scan_stats.elapsed_scan), "fetch", - scan_id->scan_stats.num_fetches, "ioread", scan_id->scan_stats.num_ioreads); + scan = json_pack ("{s:i, s:I, s:I}", "time", TO_MSEC (scan_id->scan_stats.elapsed_scan), "fetch", + scan_id->scan_stats.num_fetches, "ioread", scan_id->scan_stats.num_ioreads); - if (scan_id->type == S_HEAP_SCAN || scan_id->type == S_LIST_SCAN) + switch (scan_id->type) { + case S_HEAP_SCAN: + case S_LIST_SCAN: json_object_set_new (scan, "readrows", json_integer (scan_id->scan_stats.read_rows)); json_object_set_new (scan, "rows", json_integer (scan_id->scan_stats.qualified_rows)); @@ -7398,9 +7491,9 @@ scan_print_stats_json (SCAN_ID * scan_id, json_t * scan_stats) { json_object_set_new (scan_stats, "temp", scan); } - } - else if (scan_id->type == S_INDX_SCAN) - { + break; + + case S_INDX_SCAN: json_object_set_new (scan, "readkeys", json_integer (scan_id->scan_stats.read_keys)); json_object_set_new (scan, "filteredkeys", json_integer (scan_id->scan_stats.qualified_keys)); json_object_set_new (scan, "rows", json_integer (scan_id->scan_stats.key_qualified_rows)); @@ -7412,9 +7505,8 @@ scan_print_stats_json (SCAN_ID * scan_id, json_t * scan_stats) } else { - lookup = - json_pack ("{s:i, s:i}", "time", TO_MSEC (scan_id->scan_stats.elapsed_lookup), "rows", - scan_id->scan_stats.data_qualified_rows); + lookup = json_pack ("{s:i, s:i}", "time", TO_MSEC (scan_id->scan_stats.elapsed_lookup), "rows", + scan_id->scan_stats.data_qualified_rows); json_object_set_new (scan_stats, "lookup", lookup); } @@ -7433,26 +7525,27 @@ scan_print_stats_json (SCAN_ID * scan_id, json_t * scan_stats) { json_object_set_new (scan_stats, "loose", json_true ()); } - } - else if (scan_id->type == S_SHOWSTMT_SCAN) - { + break; + + case S_SHOWSTMT_SCAN: json_object_set_new (scan_stats, "show", scan); - } - else if (scan_id->type == S_SET_SCAN) - { + break; + + case S_SET_SCAN: json_object_set_new (scan_stats, "set", scan); - } - else if (scan_id->type == S_METHOD_SCAN) - { + break; + + case S_METHOD_SCAN: json_object_set_new (scan_stats, "method", scan); - } - else if (scan_id->type == S_CLASS_ATTR_SCAN) - { + break; + + case S_CLASS_ATTR_SCAN: json_object_set_new (scan_stats, "class_attr", scan); - } - else - { + break; + + default: json_object_set_new (scan_stats, "noscan", scan); + break; } } @@ -7469,48 +7562,52 @@ scan_print_stats_text (FILE * fp, SCAN_ID * scan_id) return; } - if (scan_id->type == S_HEAP_SCAN) + switch (scan_id->type) { + case S_HEAP_SCAN: fprintf (fp, "(heap"); - } - else if (scan_id->type == S_INDX_SCAN) - { + break; + + case S_INDX_SCAN: fprintf (fp, "(btree"); - } - else if (scan_id->type == S_LIST_SCAN) - { + break; + + case S_LIST_SCAN: fprintf (fp, "(temp"); - } - else if (scan_id->type == S_SHOWSTMT_SCAN) - { + break; + + case S_SHOWSTMT_SCAN: fprintf (fp, "(show"); - } - else if (scan_id->type == S_SET_SCAN) - { + break; + + case S_SET_SCAN: fprintf (fp, "(set"); - } - else if (scan_id->type == S_METHOD_SCAN) - { + break; + + case S_METHOD_SCAN: fprintf (fp, "(method"); - } - else if (scan_id->type == S_CLASS_ATTR_SCAN) - { + break; + + case S_CLASS_ATTR_SCAN: fprintf (fp, "(class_attr"); - } - else - { + break; + + default: fprintf (fp, "(noscan"); + break; } fprintf (fp, " time: %d, fetch: %lld, ioread: %lld", TO_MSEC (scan_id->scan_stats.elapsed_scan), (long long int) scan_id->scan_stats.num_fetches, (long long int) scan_id->scan_stats.num_ioreads); - if (scan_id->type == S_HEAP_SCAN || scan_id->type == S_LIST_SCAN) + switch (scan_id->type) { + case S_HEAP_SCAN: + case S_LIST_SCAN: fprintf (fp, ", readrows: %d, rows: %d)", scan_id->scan_stats.read_rows, scan_id->scan_stats.qualified_rows); - } - else if (scan_id->type == S_INDX_SCAN) - { + break; + + case S_INDX_SCAN: fprintf (fp, ", readkeys: %d, filteredkeys: %d, rows: %d", scan_id->scan_stats.read_keys, scan_id->scan_stats.qualified_keys, scan_id->scan_stats.key_qualified_rows); @@ -7540,10 +7637,11 @@ scan_print_stats_text (FILE * fp, SCAN_ID * scan_id) fprintf (fp, " (lookup time: %d, rows: %d)", TO_MSEC (scan_id->scan_stats.elapsed_lookup), scan_id->scan_stats.data_qualified_rows); } - } - else - { + break; + + default: fprintf (fp, ")"); + break; } } #endif diff --git a/src/query/scan_manager.h b/src/query/scan_manager.h index 61e65e420f5..ac52d88de41 100644 --- a/src/query/scan_manager.h +++ b/src/query/scan_manager.h @@ -37,12 +37,13 @@ #endif #include "btree.h" /* TODO: for BTREE_SCAN */ - -#include "oid.h" /* for OID */ -#include "storage_common.h" /* for PAGEID */ #include "heap_file.h" /* for HEAP_SCANCACHE */ #include "method_scan.h" /* for METHOD_SCAN_BUFFER */ +#include "oid.h" /* for OID */ #include "query_evaluator.h" +#include "access_json_table.hpp" +#include "scan_json_table.hpp" +#include "storage_common.h" /* for PAGEID */ /* * TYPEDEFS RELATED TO THE SCAN DATA STRUCTURES @@ -57,6 +58,7 @@ typedef enum S_INDX_SCAN, S_LIST_SCAN, S_SET_SCAN, + S_JSON_TABLE_SCAN, S_METHOD_SCAN, S_VALUES_SCAN, /* regu_values_list scan */ S_SHOWSTMT_SCAN, @@ -331,6 +333,7 @@ struct scan_id_struct VA_SCAN_ID vaid; /* Value Array Identifier */ REGU_VALUES_SCAN_ID rvsid; /* regu_variable list identifier */ SHOWSTMT_SCAN_ID stsid; /* show stmt identifier */ + JSON_TABLE_SCAN_ID jtid; } s; SCAN_STATS scan_stats; @@ -423,6 +426,9 @@ extern int scan_open_set_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, VAL_DESCR * vd, /* fields of SET_SCAN_ID */ REGU_VARIABLE * set_ptr, REGU_VARIABLE_LIST regu_list_pred, PRED_EXPR * pr); +extern int scan_open_json_table_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, int grouped, + QPROC_SINGLE_FETCH single_fetch, DB_VALUE * join_dbval, VAL_LIST * val_list, + VAL_DESCR * vd, PRED_EXPR * pr); extern int scan_open_method_scan (THREAD_ENTRY * thread_p, SCAN_ID * scan_id, /* fields of SCAN_ID */ int grouped, QPROC_SINGLE_FETCH single_fetch, DB_VALUE * join_dbval, diff --git a/src/query/stream_to_xasl.c b/src/query/stream_to_xasl.c index 5547895a23f..45e232d074b 100644 --- a/src/query/stream_to_xasl.c +++ b/src/query/stream_to_xasl.c @@ -31,92 +31,11 @@ #include #include -#include "dbtype.h" -#include "error_manager.h" #include "stream_to_xasl.h" -#include "thread_manager.hpp" - -/* memory alignment unit - to align stored XASL tree nodes */ -#define ALIGN_UNIT sizeof(double) -#define ALIGN_MASK (ALIGN_UNIT - 1) -#define MAKE_ALIGN(x) (((x) & ~ALIGN_MASK) + \ - (((x) & ALIGN_MASK) ? ALIGN_UNIT : 0)) - -/* to limit size of XASL trees */ -#define OFFSETS_PER_BLOCK 256 -#define START_PTR_PER_BLOCK 15 -#define MAX_PTR_BLOCKS 256 - -#define PTR_BLOCK(ptr) (((UINTPTR) ptr) / sizeof(UINTPTR)) % MAX_PTR_BLOCKS - -/* - * the linear byte stream for store the given XASL tree is allocated - * and expanded dynamically on demand by the following amount of bytes - */ -#define STREAM_EXPANSION_UNIT (OFFSETS_PER_BLOCK * sizeof(int)) -#define BUFFER_EXPANSION 4 - -#define BOUND_VAL (1 << ((OR_INT_SIZE * 8) - 2)) - -/* structure of a visited pointer constant */ -typedef struct visited_ptr VISITED_PTR; -struct visited_ptr -{ - const void *ptr; /* a pointer constant */ - void *str; /* where the struct pointed by 'ptr' is stored */ -}; - -/* structure for additional memory during filtered predicate unpacking */ -typedef struct unpack_extra_buf UNPACK_EXTRA_BUF; -struct unpack_extra_buf -{ - char *buff; - UNPACK_EXTRA_BUF *next; -}; - -/* structure to hold information needed during packing */ -typedef struct xasl_unpack_info XASL_UNPACK_INFO; -struct xasl_unpack_info -{ - char *packed_xasl; /* ptr to packed xasl tree */ -#if defined (SERVER_MODE) - THREAD_ENTRY *thrd; /* used for private allocation */ -#endif /* SERVER_MODE */ - /* blocks of visited pointer constants */ - VISITED_PTR *ptr_blocks[MAX_PTR_BLOCKS]; - - char *alloc_buf; /* alloced buf */ - - int packed_size; /* packed xasl tree size */ - - /* low-water-mark of visited pointers */ - int ptr_lwm[MAX_PTR_BLOCKS]; - - /* max number of visited pointers */ - int ptr_max[MAX_PTR_BLOCKS]; - - int alloc_size; /* alloced buf size */ - - /* list of additional buffers allocated during xasl unpacking */ - UNPACK_EXTRA_BUF *additional_buffers; - /* 1 if additional buffers should be tracked */ - int track_allocated_bufers; - - bool use_xasl_clone; /* true, if uses xasl clone */ -}; - -#if !defined(SERVER_MODE) -static XASL_UNPACK_INFO *xasl_unpack_info; -static int stx_Xasl_errcode = NO_ERROR; -#endif /* !SERVER_MODE */ - -static int stx_get_xasl_errcode (THREAD_ENTRY * thread_p); -static void stx_set_xasl_errcode (THREAD_ENTRY * thread_p, int errcode); -static XASL_UNPACK_INFO *stx_get_xasl_unpack_info_ptr (THREAD_ENTRY * thread_p); -#if defined(SERVER_MODE) -static void stx_set_xasl_unpack_info_ptr (THREAD_ENTRY * thread_p, XASL_UNPACK_INFO * ptr); -#endif /* SERVER_MODE */ +#include "dbtype.h" +#include "error_manager.h" +#include "xasl_stream.hpp" static ACCESS_SPEC_TYPE *stx_restore_access_spec_type (THREAD_ENTRY * thread_p, char **ptr, void *arg); static AGGREGATE_TYPE *stx_restore_aggregate_type (THREAD_ENTRY * thread_p, char *ptr); @@ -137,7 +56,6 @@ static REGU_VARIABLE *stx_restore_regu_variable (THREAD_ENTRY * thread_p, char * static REGU_VARIABLE_LIST stx_restore_regu_variable_list (THREAD_ENTRY * thread_p, char *ptr); static REGU_VARLIST_LIST stx_restore_regu_varlist_list (THREAD_ENTRY * thread_p, char *ptr); static SORT_LIST *stx_restore_sort_list (THREAD_ENTRY * thread_p, char *ptr); -static char *stx_restore_string (THREAD_ENTRY * thread_p, char *ptr); static VAL_LIST *stx_restore_val_list (THREAD_ENTRY * thread_p, char *ptr); static DB_VALUE *stx_restore_db_value (THREAD_ENTRY * thread_p, char *ptr); #if defined(ENABLE_UNUSED_FUNCTION) @@ -202,14 +120,12 @@ static char *stx_build_regu_variable (THREAD_ENTRY * thread_p, char *tmp, REGU_V static char *stx_unpack_regu_variable_value (THREAD_ENTRY * thread_p, char *tmp, REGU_VARIABLE * ptr); static char *stx_build_attr_descr (THREAD_ENTRY * thread_p, char *tmp, ATTR_DESCR * ptr); static char *stx_build_pos_descr (char *tmp, QFILE_TUPLE_VALUE_POSITION * ptr); -static char *stx_build_db_value (THREAD_ENTRY * thread_p, char *tmp, DB_VALUE * ptr); static char *stx_build_arith_type (THREAD_ENTRY * thread_p, char *tmp, ARITH_TYPE * ptr); static char *stx_build_aggregate_type (THREAD_ENTRY * thread_p, char *tmp, AGGREGATE_TYPE * ptr); static char *stx_build_function_type (THREAD_ENTRY * thread_p, char *tmp, FUNCTION_TYPE * ptr); static char *stx_build_analytic_type (THREAD_ENTRY * thread_p, char *tmp, ANALYTIC_TYPE * ptr); static char *stx_build_analytic_eval_type (THREAD_ENTRY * thread_p, char *tmp, ANALYTIC_EVAL_TYPE * ptr); static char *stx_build_srlist_id (THREAD_ENTRY * thread_p, char *tmp, QFILE_SORTED_LIST_ID * ptr); -static char *stx_build_string (THREAD_ENTRY * thread_p, char *tmp, char *ptr); static char *stx_build_sort_list (THREAD_ENTRY * thread_p, char *tmp, SORT_LIST * ptr); static char *stx_build_connectby_proc (THREAD_ENTRY * thread_p, char *tmp, CONNECTBY_PROC_NODE * ptr); @@ -219,14 +135,8 @@ static char *stx_build_regu_value_list (THREAD_ENTRY * thread_p, char *ptr, REGU TP_DOMAIN * domain); static void stx_init_regu_variable (REGU_VARIABLE * regu); -static int stx_mark_struct_visited (THREAD_ENTRY * thread_p, const void *ptr, void *str); -static void *stx_get_struct_visited_ptr (THREAD_ENTRY * thread_p, const void *ptr); -static void stx_free_visited_ptrs (THREAD_ENTRY * thread_p); -static char *stx_alloc_struct (THREAD_ENTRY * thread_p, int size); -static int stx_init_xasl_unpack_info (THREAD_ENTRY * thread_p, char *xasl_stream, int xasl_stream_size); static char *stx_build_regu_variable_list (THREAD_ENTRY * thread_p, char *ptr, REGU_VARIABLE_LIST * regu_var_list); - #if defined(ENABLE_UNUSED_FUNCTION) static char *stx_unpack_char (char *tmp, char *ptr); static char *stx_unpack_long (char *tmp, long *ptr); @@ -256,7 +166,7 @@ stx_map_stream_to_xasl_node_header (THREAD_ENTRY * thread_p, XASL_NODE_HEADER * offset = OR_INT_SIZE + /* xasl stream header size */ xasl_stream_header_size + /* xasl stream header data */ OR_INT_SIZE; /* xasl stream body size */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); ptr = xasl_stream + offset; OR_UNPACK_XASL_NODE_HEADER (ptr, xasl_header_p); return NO_ERROR; @@ -303,7 +213,7 @@ stx_map_stream_to_xasl (THREAD_ENTRY * thread_p, XASL_NODE ** xasl_tree, bool us offset = sizeof (int) /* [size of header data] */ + header_size /* [header data] */ + sizeof (int); /* [size of body data] */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); /* restore XASL tree from body data of the stream buffer */ xasl = stx_restore_xasl_node (thread_p, xasl_stream + offset); @@ -379,7 +289,7 @@ stx_map_stream_to_filter_pred (THREAD_ENTRY * thread_p, PRED_EXPR_WITH_CONTEXT * offset = sizeof (int) /* [size of header data] */ + header_size /* [header data] */ + sizeof (int); /* [size of body data] */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); /* restore XASL tree from body data of the stream buffer */ pwc = stx_restore_filter_pred_node (thread_p, pred_stream + offset); @@ -408,7 +318,7 @@ stx_map_stream_to_filter_pred (THREAD_ENTRY * thread_p, PRED_EXPR_WITH_CONTEXT * /* * stx_map_stream_to_func_pred () - * return: if successful, return 0, otherwise non-zero error code - * xasl(in) : pointer to where to return the unpacked FUNC_PRED + * xasl(in) : pointer to where to return the unpacked FUNC_PRED * xasl_stream(in) : pointer to xasl stream * xasl_stream_size(in) : # of bytes in xasl_stream * xasl_unpack_info_ptr(in) : pointer to where to return the pack info @@ -439,7 +349,7 @@ stx_map_stream_to_func_pred (THREAD_ENTRY * thread_p, FUNC_PRED ** xasl, char *x offset = sizeof (int) /* [size of header data] */ + header_size /* [header data] */ + sizeof (int); /* [size of body data] */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); /* restore XASL tree from body data of the stream buffer */ p_xasl = stx_restore_func_pred (thread_p, xasl_stream + offset); @@ -1392,51 +1302,6 @@ stx_restore_method_sig (THREAD_ENTRY * thread_p, char *ptr, int count) return method_sig; } -static char * -stx_restore_string (THREAD_ENTRY * thread_p, char *ptr) -{ - char *string; - int length; - - if (ptr == NULL) - { - return NULL; - } - - string = (char *) stx_get_struct_visited_ptr (thread_p, ptr); - if (string != NULL) - { - return string; - } - - length = OR_GET_INT (ptr); - - if (length == -1) - { - /* unpack null-string */ - assert (string == NULL); - } - else - { - assert_release (length > 0); - - string = (char *) stx_alloc_struct (thread_p, length); - if (string == NULL) - { - stx_set_xasl_errcode (thread_p, ER_OUT_OF_VIRTUAL_MEMORY); - return NULL; - } - - if (stx_mark_struct_visited (thread_p, ptr, string) == ER_FAILED - || stx_build_string (thread_p, ptr, string) == NULL) - { - return NULL; - } - } - - return string; -} - static DB_VALUE ** stx_restore_db_value_array_extra (THREAD_ENTRY * thread_p, char *ptr, int nelements, int total_nelements) { @@ -2283,7 +2148,7 @@ stx_build_xasl_node (THREAD_ENTRY * thread_p, char *ptr, XASL_NODE * xasl) ptr = or_unpack_int (ptr, &xasl->mvcc_reev_extra_cls_cnt); #if defined (ENABLE_COMPOSITE_LOCK) - /* + /* * Note that the composite lock block is strictly a server side block * and was not packed. We'll simply clear the memory. */ @@ -2363,20 +2228,8 @@ stx_build_xasl_node (THREAD_ENTRY * thread_p, char *ptr, XASL_NODE * xasl) ptr = or_unpack_int (ptr, &tmp); xasl->iscan_oid_order = (bool) tmp; - ptr = or_unpack_int (ptr, &offset); - if (offset == 0) - { - assert (false); - xasl->query_alias = NULL; - } - else - { - xasl->query_alias = stx_restore_string (thread_p, &xasl_unpack_info->packed_xasl[offset]); - if (xasl->query_alias == NULL) - { - goto error; - } - } + xasl->query_alias = stx_restore_string (thread_p, ptr); + assert (xasl->query_alias != NULL); ptr = or_unpack_int (ptr, &offset); if (offset == 0) @@ -2602,18 +2455,15 @@ stx_build_method_sig (THREAD_ENTRY * thread_p, char *ptr, METHOD_SIG * method_si int num_args, n; XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); - ptr = or_unpack_int (ptr, &offset); - assert (offset > 0); - method_sig->method_name = stx_restore_string (thread_p, &xasl_unpack_info->packed_xasl[offset]); + method_sig->method_name = stx_restore_string (thread_p, ptr); if (method_sig->method_name == NULL) { + assert (false); goto error; } - ptr = or_unpack_int (ptr, &offset); - assert (offset > 0); /* is can be null */ - method_sig->class_name = stx_restore_string (thread_p, &xasl_unpack_info->packed_xasl[offset]); + method_sig->class_name = stx_restore_string (thread_p, ptr); ptr = or_unpack_int (ptr, (int *) &method_sig->method_type); ptr = or_unpack_int (ptr, &method_sig->num_method_args); @@ -4564,6 +4414,10 @@ stx_build_access_spec_type (THREAD_ENTRY * thread_p, char *ptr, ACCESS_SPEC_TYPE ptr = stx_build_method_spec_type (thread_p, ptr, &ACCESS_SPEC_METHOD_SPEC (access_spec)); break; + case TARGET_JSON_TABLE: + ptr = stx_build (thread_p, ptr, ACCESS_SPEC_JSON_TABLE_SPEC (access_spec)); + break; + default: stx_set_xasl_errcode (thread_p, ER_QPROC_INVALID_XASLNODE); return NULL; @@ -4578,6 +4432,12 @@ stx_build_access_spec_type (THREAD_ENTRY * thread_p, char *ptr, ACCESS_SPEC_TYPE memset (&access_spec->s_id, '\0', sizeof (SCAN_ID)); access_spec->s_id.status = S_CLOSED; + if (access_spec->type == TARGET_JSON_TABLE) + { + // also initialize scan part; it is enough to call it once here, not on each query execution + access_spec->s_id.s.jtid.init (access_spec->s.json_table_node); + } + access_spec->grouped_scan = false; access_spec->fixed_scan = false; @@ -4589,7 +4449,7 @@ stx_build_access_spec_type (THREAD_ENTRY * thread_p, char *ptr, ACCESS_SPEC_TYPE access_spec->curent = NULL; access_spec->pruned = false; - access_spec->clear_value_at_clone_decache = false; + access_spec->clear_value_at_clone_decache = xasl_unpack_info->use_xasl_clone; ptr = or_unpack_int (ptr, &offset); if (offset == 0) { @@ -4602,10 +4462,6 @@ stx_build_access_spec_type (THREAD_ENTRY * thread_p, char *ptr, ACCESS_SPEC_TYPE { goto error; } - if (xasl_unpack_info->use_xasl_clone && !db_value_is_null (access_spec->s_dbval)) - { - access_spec->clear_value_at_clone_decache = true; - } } access_spec->parts = NULL; @@ -5540,14 +5396,6 @@ stx_build_pos_descr (char *ptr, QFILE_TUPLE_VALUE_POSITION * position_descr) return ptr; } -static char * -stx_build_db_value (THREAD_ENTRY * thread_p, char *ptr, DB_VALUE * value) -{ - ptr = or_unpack_db_value (ptr, value); - - return ptr; -} - static char * stx_build_arith_type (THREAD_ENTRY * thread_p, char *ptr, ARITH_TYPE * arith_type) { @@ -5749,8 +5597,7 @@ stx_build_aggregate_type (THREAD_ENTRY * thread_p, char *ptr, AGGREGATE_TYPE * a aggregate->opr_dbtype = (DB_TYPE) tmp; aggregate->original_opr_dbtype = aggregate->opr_dbtype; - /* operand */ - ptr = stx_build_regu_variable (thread_p, ptr, &aggregate->operand); + ptr = stx_build_regu_variable_list (thread_p, ptr, &aggregate->operands); if (ptr == NULL) { return NULL; @@ -6123,20 +5970,6 @@ stx_build_srlist_id (THREAD_ENTRY * thread_p, char *ptr, QFILE_SORTED_LIST_ID * return ptr; } -static char * -stx_build_string (THREAD_ENTRY * thread_p, char *ptr, char *string) -{ - int offset; - - ptr = or_unpack_int (ptr, &offset); - assert_release (offset > 0); - - (void) memcpy (string, ptr, offset); - ptr += offset; - - return ptr; -} - static char * stx_build_sort_list (THREAD_ENTRY * thread_p, char *ptr, SORT_LIST * sort_list) { @@ -6554,290 +6387,6 @@ stx_init_regu_variable (REGU_VARIABLE * regu) REGU_VARIABLE_XASL (regu) = NULL; } -/* - * stx_mark_struct_visited () - - * return: if successful, return NO_ERROR, otherwise - * ER_FAILED and error code is set to xasl_errcode - * ptr(in) : pointer constant to be marked visited - * str(in) : where the struct pointed by 'ptr' is stored - * - * Note: mark the given pointer constant as visited to avoid - * duplicated storage of a struct which is pointed by more than one node - */ -static int -stx_mark_struct_visited (THREAD_ENTRY * thread_p, const void *ptr, void *str) -{ - int new_lwm; - int block_no; - XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); - - if (thread_p == NULL) - { - thread_p = thread_get_thread_entry_info (); - } - - block_no = PTR_BLOCK (ptr); - new_lwm = xasl_unpack_info->ptr_lwm[block_no]; - - if (xasl_unpack_info->ptr_max[block_no] == 0) - { - xasl_unpack_info->ptr_max[block_no] = START_PTR_PER_BLOCK; - xasl_unpack_info->ptr_blocks[block_no] = - (VISITED_PTR *) db_private_alloc (thread_p, sizeof (VISITED_PTR) * xasl_unpack_info->ptr_max[block_no]); - } - else if (xasl_unpack_info->ptr_max[block_no] <= new_lwm) - { - xasl_unpack_info->ptr_max[block_no] *= 2; - xasl_unpack_info->ptr_blocks[block_no] = - (VISITED_PTR *) db_private_realloc (thread_p, xasl_unpack_info->ptr_blocks[block_no], - sizeof (VISITED_PTR) * xasl_unpack_info->ptr_max[block_no]); - } - - if (xasl_unpack_info->ptr_blocks[block_no] == (VISITED_PTR *) NULL) - { - stx_set_xasl_errcode (thread_p, ER_OUT_OF_VIRTUAL_MEMORY); - return ER_FAILED; - } - - xasl_unpack_info->ptr_blocks[block_no][new_lwm].ptr = ptr; - xasl_unpack_info->ptr_blocks[block_no][new_lwm].str = str; - - xasl_unpack_info->ptr_lwm[block_no]++; - - return NO_ERROR; -} - -/* - * stx_get_struct_visited_ptr () - - * return: if the ptr is already visited, the offset of - * position where the node pointed by 'ptr' is stored, - * otherwise, ER_FAILED (xasl_errcode is NOT set) - * ptr(in) : pointer constant to be checked if visited or not - * - * Note: check if the node pointed by `ptr` is already stored or - * not to avoid multiple store of the same node - */ -static void * -stx_get_struct_visited_ptr (THREAD_ENTRY * thread_p, const void *ptr) -{ - int block_no; - int element_no; - XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); - - block_no = PTR_BLOCK (ptr); - - if (xasl_unpack_info->ptr_lwm[block_no] <= 0) - { - return NULL; - } - - for (element_no = 0; element_no < xasl_unpack_info->ptr_lwm[block_no]; element_no++) - { - if (ptr == xasl_unpack_info->ptr_blocks[block_no][element_no].ptr) - { - return (xasl_unpack_info->ptr_blocks[block_no][element_no].str); - } - } - - return NULL; -} - -/* - * stx_free_visited_ptrs () - - * return: - * - * Note: free memory allocated to manage visited ptr constants - */ -static void -stx_free_visited_ptrs (THREAD_ENTRY * thread_p) -{ - int i; - XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); - - for (i = 0; i < MAX_PTR_BLOCKS; i++) - { - xasl_unpack_info->ptr_lwm[i] = 0; - xasl_unpack_info->ptr_max[i] = 0; - if (xasl_unpack_info->ptr_blocks[i]) - { - db_private_free_and_init (thread_p, xasl_unpack_info->ptr_blocks[i]); - xasl_unpack_info->ptr_blocks[i] = (VISITED_PTR *) 0; - } - } -} - -/* - * stx_alloc_struct () - - * return: - * size(in) : # of bytes of the node - * - * Note: allocate storage for structures pointed to from the xasl tree. - */ -static char * -stx_alloc_struct (THREAD_ENTRY * thread_p, int size) -{ - char *ptr; - XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); - - if (!size) - { - return NULL; - } - - size = MAKE_ALIGN (size); /* alignment */ - if (size > xasl_unpack_info->alloc_size) - { /* need to alloc */ - int p_size; - - p_size = MAX (size, xasl_unpack_info->packed_size); - p_size = MAKE_ALIGN (p_size); /* alignment */ - ptr = (char *) db_private_alloc (thread_p, p_size); - if (ptr == NULL) - { - return NULL; /* error */ - } - xasl_unpack_info->alloc_size = p_size; - xasl_unpack_info->alloc_buf = ptr; - if (xasl_unpack_info->track_allocated_bufers) - { - UNPACK_EXTRA_BUF *add_buff = NULL; - add_buff = (UNPACK_EXTRA_BUF *) db_private_alloc (thread_p, sizeof (UNPACK_EXTRA_BUF)); - if (add_buff == NULL) - { - db_private_free_and_init (thread_p, ptr); - return NULL; - } - add_buff->buff = ptr; - add_buff->next = NULL; - - if (xasl_unpack_info->additional_buffers == NULL) - { - xasl_unpack_info->additional_buffers = add_buff; - } - else - { - add_buff->next = xasl_unpack_info->additional_buffers; - xasl_unpack_info->additional_buffers = add_buff; - } - } - } - - /* consume alloced buffer */ - ptr = xasl_unpack_info->alloc_buf; - xasl_unpack_info->alloc_size -= size; - xasl_unpack_info->alloc_buf += size; - - return ptr; -} - -/* - * stx_init_xasl_unpack_info () - - * return: - * xasl_stream(in) : pointer to xasl stream - * xasl_stream_size(in) : - * - * Note: initialize the xasl pack information. - */ -static int -stx_init_xasl_unpack_info (THREAD_ENTRY * thread_p, char *xasl_stream, int xasl_stream_size) -{ - int n; -#if defined(SERVER_MODE) - XASL_UNPACK_INFO *xasl_unpack_info; -#endif /* SERVER_MODE */ - int head_offset, body_offset; - -#define UNPACK_SCALE 3 /* TODO: assume */ - - head_offset = sizeof (XASL_UNPACK_INFO); - head_offset = MAKE_ALIGN (head_offset); - body_offset = xasl_stream_size * UNPACK_SCALE; - body_offset = MAKE_ALIGN (body_offset); -#if defined(SERVER_MODE) - xasl_unpack_info = (XASL_UNPACK_INFO *) db_private_alloc (thread_p, head_offset + body_offset); - stx_set_xasl_unpack_info_ptr (thread_p, xasl_unpack_info); -#else /* SERVER_MODE */ - xasl_unpack_info = (XASL_UNPACK_INFO *) db_private_alloc (NULL, head_offset + body_offset); -#endif /* SERVER_MODE */ - if (xasl_unpack_info == NULL) - { - return ER_FAILED; - } - xasl_unpack_info->packed_xasl = xasl_stream; - xasl_unpack_info->packed_size = xasl_stream_size; - for (n = 0; n < MAX_PTR_BLOCKS; ++n) - { - xasl_unpack_info->ptr_blocks[n] = (VISITED_PTR *) 0; - xasl_unpack_info->ptr_lwm[n] = 0; - xasl_unpack_info->ptr_max[n] = 0; - } - xasl_unpack_info->alloc_size = xasl_stream_size * UNPACK_SCALE; - xasl_unpack_info->alloc_buf = (char *) xasl_unpack_info + head_offset; - xasl_unpack_info->additional_buffers = NULL; - xasl_unpack_info->track_allocated_bufers = 0; -#if defined (SERVER_MODE) - xasl_unpack_info->thrd = thread_p; -#endif /* SERVER_MODE */ - - return NO_ERROR; -} - -/* - * stx_get_xasl_unpack_info_ptr () - - * return: - */ -static XASL_UNPACK_INFO * -stx_get_xasl_unpack_info_ptr (THREAD_ENTRY * thread_p) -{ -#if defined(SERVER_MODE) - return (XASL_UNPACK_INFO *) thread_p->xasl_unpack_info_ptr; -#else /* SERVER_MODE */ - return (XASL_UNPACK_INFO *) xasl_unpack_info; -#endif /* SERVER_MODE */ -} - -#if defined(SERVER_MODE) -/* - * stx_set_xasl_unpack_info_ptr () - - * return: - * ptr(in) : - */ -static void -stx_set_xasl_unpack_info_ptr (THREAD_ENTRY * thread_p, XASL_UNPACK_INFO * ptr) -{ - thread_p->xasl_unpack_info_ptr = ptr; -} -#endif /* SERVER_MODE */ - -/* - * stx_get_xasl_errcode () - - * return: - */ -static int -stx_get_xasl_errcode (THREAD_ENTRY * thread_p) -{ -#if defined(SERVER_MODE) - return thread_p->xasl_errcode; -#else /* SERVER_MODE */ - return stx_Xasl_errcode; -#endif /* SERVER_MODE */ -} - -/* - * stx_set_xasl_errcode () - - * return: - * errcode(in) : - */ -static void -stx_set_xasl_errcode (THREAD_ENTRY * thread_p, int errcode) -{ -#if defined(SERVER_MODE) - thread_p->xasl_errcode = errcode; -#else /* SERVER_MODE */ - stx_Xasl_errcode = errcode; -#endif /* SERVER_MODE */ -} - #if defined(ENABLE_UNUSED_FUNCTION) /* * stx_unpack_char () - @@ -6893,3 +6442,9 @@ stx_init_analytic_type_unserialized_fields (ANALYTIC_TYPE * analytic) /* curr_cnt */ analytic->curr_cnt = 0; } + +char * +stx_build (THREAD_ENTRY * thread_p, char *ptr, regu_variable_node & reguvar) +{ + return stx_build_regu_variable (thread_p, ptr, ®uvar); +} diff --git a/src/query/stream_to_xasl.h b/src/query/stream_to_xasl.h index 68a02737936..f310c4b27c7 100644 --- a/src/query/stream_to_xasl.h +++ b/src/query/stream_to_xasl.h @@ -21,16 +21,12 @@ #define _STREAM_TO_XASL_H_ #if !defined (SERVER_MODE) && !defined (SA_MODE) -#error Belongs to server module +#error Belongs only to server or stand-alone modules. #endif /* !defined (SERVER_MODE) && !defined (SA_MODE) */ #include "thread_compat.hpp" #include "xasl.h" -#if !defined (SERVER_MODE) && !defined (SA_MODE) -#error Belongs only to server or stand-alone modules. -#endif /* !defined (SERVER_MODE) && !defined (SA_MODE) */ - extern int stx_map_stream_to_xasl (THREAD_ENTRY * thread_p, XASL_NODE ** xasl_tree, bool use_xasl_clone, char *xasl_stream, int xasl_stream_size, void **xasl_unpack_info_ptr); extern int stx_map_stream_to_filter_pred (THREAD_ENTRY * thread_p, PRED_EXPR_WITH_CONTEXT ** pred_expr_tree, diff --git a/src/query/string_opfunc.c b/src/query/string_opfunc.c index 57129f5e512..27c59d6c358 100644 --- a/src/query/string_opfunc.c +++ b/src/query/string_opfunc.c @@ -42,6 +42,7 @@ #include "system_parameter.h" #include "intl_support.h" #include "error_manager.h" +#include "tz_support.h" #include "db_date.h" #include "misc_string.h" #include "md5.h" @@ -50,6 +51,7 @@ #include "tz_support.h" #include "object_primitive.h" #include "dbtype.h" +#include "elo.h" #include "db_elo.h" #include #if !defined (SERVER_MODE) @@ -284,6 +286,7 @@ static int print_string_date_token (const STRING_DATE_TOKEN token_type, const IN static void convert_locale_number (char *sz, const int size, const INTL_LANG src_locale, const INTL_LANG dst_locale); static int parse_tzd (const char *str, const int max_expect_len); static int db_value_to_json_doc (const DB_VALUE & value, REFPTR (JSON_DOC, json)); +static int db_json_merge_helper (DB_VALUE * result, DB_VALUE * arg[], int const num_args, bool patch = false); #define TRIM_FORMAT_STRING(sz, n) {if (strlen(sz) > n) sz[n] = 0;} #define WHITESPACE(c) ((c) == ' ' || (c) == '\t' || (c) == '\r' || (c) == '\n') @@ -296,6 +299,9 @@ static int db_value_to_json_doc (const DB_VALUE & value, REFPTR (JSON_DOC, json) || (c) == ';' || (c) == ':' || (c) == ' ' \ || (c) == '\t' || (c) == '\n') +/* character that need escaping when making Json String */ +#define ESCAPE_CHAR(c) (c <= 0x1f || (c) == '"' || (c) == '\\') + /* concatenate a char to s */ #define STRCHCAT(s, c) \ {\ @@ -767,7 +773,7 @@ db_string_unique_prefix (const DB_VALUE * db_string1, const DB_VALUE * db_string return ER_GENERIC_ERROR; } - /* + /* * A string which is NULL (not the same as a NULL string) is * ordered less than a string which is not NULL. Since string2 is * assumed to be strictly > string1, string2 can never be NULL. @@ -776,7 +782,7 @@ db_string_unique_prefix (const DB_VALUE * db_string1, const DB_VALUE * db_string { db_value_domain_init (db_result, result_type, precision, 0); } - /* + /* * Find the first byte where the 2 strings differ. Set the result * accordingly. */ @@ -808,8 +814,8 @@ db_string_unique_prefix (const DB_VALUE * db_string1, const DB_VALUE * db_string } else if (result_type == DB_TYPE_VARNCHAR) { - /* This is going to look a lot like qstr_trim_trailing. We don't call qstr_trim_trailing because he works on - * length of characters and we need to work on length of bytes. We could calculate the length in characters, + /* This is going to look a lot like qstr_trim_trailing. We don't call qstr_trim_trailing because he works on + * length of characters and we need to work on length of bytes. We could calculate the length in characters, * but that requires a full scan of the strings which is not necessary. */ int i, pad_size, trim_length, cmp_flag, prev_size; unsigned char *prev_ptr, *current_ptr, pad[2]; @@ -972,12 +978,12 @@ db_string_concatenate (const DB_VALUE * string1, const DB_VALUE * string2, DB_VA DB_TYPE string_type1, string_type2; bool is_inplace_concat; - /* + /* * Initialize status value */ *data_status = DATA_STATUS_OK; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string1 != (DB_VALUE *) NULL); @@ -992,7 +998,7 @@ db_string_concatenate (const DB_VALUE * string1, const DB_VALUE * string2, DB_VA is_inplace_concat = true; } - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -1404,7 +1410,7 @@ db_string_instr (const DB_VALUE * src_string, const DB_VALUE * sub_string, const DB_TYPE str1_type, str2_type; DB_TYPE arg3_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); @@ -1412,7 +1418,7 @@ db_string_instr (const DB_VALUE * src_string, const DB_VALUE * sub_string, const assert (start_pos != (DB_VALUE *) NULL); assert (result != (DB_VALUE *) NULL); - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -1658,7 +1664,7 @@ db_string_position (const DB_VALUE * sub_string, const DB_VALUE * src_string, DB int error_status = NO_ERROR; DB_TYPE str1_type, str2_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (sub_string != (DB_VALUE *) NULL); @@ -1666,7 +1672,7 @@ db_string_position (const DB_VALUE * sub_string, const DB_VALUE * src_string, DB assert (result != (DB_VALUE *) NULL); - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -1770,7 +1776,7 @@ db_string_substring (const MISC_OPERAND substr_operand, const DB_VALUE * src_str DB_TYPE result_type; DB_TYPE src_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); @@ -1887,6 +1893,70 @@ db_string_substring (const MISC_OPERAND substr_operand, const DB_VALUE * src_str return error_status; } +/* + * db_string_quote - escape a string and surround it with quotes + * return: If success, return 0. + * src(in): str + * res(out): quoted string + * Note: + */ +int +db_string_quote (const DB_VALUE * str, DB_VALUE * res) +{ + if (DB_IS_NULL (str)) + { + return db_make_null (res); + } + else + { + char *src_str = db_get_string (str); + int src_size = db_get_string_size (str); + int dest_crt_pos; + int src_last_pos; + + // *INDENT-OFF* + std::vector special_idx; + // *INDENT-ON* + for (int i = 0; i < src_size; ++i) + { + unsigned char uc = (unsigned char) src_str[i]; + if (ESCAPE_CHAR (uc)) + { + special_idx.push_back (i); + } + } + int dest_size = src_size + special_idx.size () + 2; + char *result = (char *) db_private_alloc (NULL, dest_size); + if (result == NULL) + { + return ER_OUT_OF_VIRTUAL_MEMORY; + } + + result[0] = '"'; + dest_crt_pos = 1; + src_last_pos = 0; + for (int i = 0; i < special_idx.size (); ++i) + { + int len = special_idx[i] - src_last_pos; + memcpy (&result[dest_crt_pos], &src_str[src_last_pos], len); + dest_crt_pos += len; + result[dest_crt_pos] = '\\'; + ++dest_crt_pos; + src_last_pos = special_idx[i]; + } + memcpy (&result[dest_crt_pos], &src_str[src_last_pos], src_size - src_last_pos); + result[dest_size - 1] = '"'; + + db_make_null (res); + DB_TYPE result_type = DB_TYPE_CHAR; + qstr_make_typed_string (result_type, res, DB_VALUE_PRECISION (res), result, + (const int) dest_size, db_get_string_codeset (str), db_get_string_collation (str)); + + res->need_clear = true; + return NO_ERROR; + } +} + /* * db_string_repeat * @@ -1913,7 +1983,7 @@ db_string_repeat (const DB_VALUE * src_string, const DB_VALUE * count, DB_VALUE DB_TYPE src_type; INTL_CODESET codeset; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); @@ -2063,14 +2133,14 @@ db_string_substring_index (DB_VALUE * src_string, DB_VALUE * delim_string, const INTL_CODESET src_cs, delim_cs; int src_coll, delim_coll; - /* + /* * Initialize status value */ db_make_null (result); db_make_null (&empty_string1); db_make_null (&empty_string2); - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); @@ -2086,7 +2156,7 @@ db_string_substring_index (DB_VALUE * src_string, DB_VALUE * delim_string, const } count_i = db_get_int (count); - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -2716,7 +2786,7 @@ db_string_insert_substring (DB_VALUE * src_string, const DB_VALUE * position, co INTL_CODESET src_cs, substr_cs; int src_coll, substr_coll; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); @@ -2725,7 +2795,7 @@ db_string_insert_substring (DB_VALUE * src_string, const DB_VALUE * position, co assert (length != (DB_VALUE *) NULL); assert (result != (DB_VALUE *) NULL); - /* + /* * Initialize values */ db_make_null (result); @@ -2735,7 +2805,7 @@ db_string_insert_substring (DB_VALUE * src_string, const DB_VALUE * position, co db_make_null (&empty_string2); db_make_null (&partial_result); - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -3047,6 +3117,7 @@ db_json_object (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args % 2 != 0) { assert (false); // should be caught earlier + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3086,6 +3157,10 @@ db_json_object (DB_VALUE * result, DB_VALUE * arg[], int const num_args) error_code = db_json_add_member_to_object (new_doc, db_get_string (arg[i]), db_get_int (arg[i + 1])); break; + case DB_TYPE_BIGINT: + error_code = db_json_add_member_to_object (new_doc, db_get_string (arg[i]), db_get_bigint (arg[i + 1])); + break; + case DB_TYPE_DOUBLE: error_code = db_json_add_member_to_object (new_doc, db_get_string (arg[i]), db_get_double (arg[i + 1])); break; @@ -3164,6 +3239,10 @@ db_json_array (DB_VALUE * result, DB_VALUE * arg[], int const num_args) db_json_add_element_to_array (new_doc, db_get_int (arg[i])); break; + case DB_TYPE_BIGINT: + db_json_add_element_to_array (new_doc, db_get_bigint (arg[i])); + break; + case DB_TYPE_DOUBLE: db_json_add_element_to_array (new_doc, db_get_double (arg[i])); break; @@ -3210,6 +3289,7 @@ db_json_insert (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args < 3 || num_args % 2 == 0) { assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3281,6 +3361,7 @@ db_json_replace (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args < 3 || num_args % 2 == 0) { assert_release (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3351,6 +3432,7 @@ db_json_set (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args < 3 || num_args % 2 == 0) { assert_release (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3424,6 +3506,7 @@ db_json_keys (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args > 2) { assert_release (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3485,6 +3568,7 @@ db_json_remove (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args < 2) { assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3534,6 +3618,7 @@ db_json_array_append (DB_VALUE * result, DB_VALUE * arg[], int const num_args) if (num_args < 3 || num_args % 2 == 0) { assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3560,6 +3645,9 @@ db_json_array_append (DB_VALUE * result, DB_VALUE * arg[], int const num_args) switch (DB_VALUE_DOMAIN_TYPE (arg[i + 1])) { case DB_TYPE_CHAR: + case DB_TYPE_VARCHAR: + case DB_TYPE_NCHAR: + case DB_TYPE_VARNCHAR: error_code = db_json_convert_string_and_call (db_get_string (arg[i + 1]), db_get_string_size (arg[i + 1]), db_json_array_append_func, *new_doc, db_get_string (arg[i])); break; @@ -3591,17 +3679,148 @@ db_json_array_append (DB_VALUE * result, DB_VALUE * arg[], int const num_args) return NO_ERROR; } -/* - * db_json_merge () - * this function merges two by two json - * so merge (j1, j2, j3, j4) = merge_two (j1, (merge (j2, merge (j3, j4)))) - * result (out): the merge result - * arg (in): the arguments for the merge function - * num_args (in) - */ +int +db_json_array_insert (DB_VALUE * result, DB_VALUE * arg[], int const num_args) +{ + int i, error_code = NO_ERROR; + JSON_DOC *new_doc = NULL; + char *str = NULL; + + db_make_null (result); + + if (num_args < 3 || num_args % 2 == 0) + { + assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); + return ER_FAILED; + } + + if (DB_IS_NULL (arg[0])) + { + return NO_ERROR; + } + + error_code = db_value_to_json_doc (*arg[0], new_doc); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + for (i = 1; i < num_args; i += 2) + { + if (DB_IS_NULL (arg[i]) || DB_IS_NULL (arg[i + 1])) + { + db_json_delete_doc (new_doc); + return db_make_null (result); + } + + switch (DB_VALUE_DOMAIN_TYPE (arg[i + 1])) + { + case DB_TYPE_CHAR: + case DB_TYPE_VARCHAR: + case DB_TYPE_NCHAR: + case DB_TYPE_VARNCHAR: + error_code = db_json_convert_string_and_call (db_get_string (arg[i + 1]), db_get_string_size (arg[i + 1]), + db_json_array_insert_func, *new_doc, db_get_string (arg[i])); + break; + + case DB_TYPE_JSON: + error_code = db_json_array_insert_func (arg[i + 1]->data.json.document, *new_doc, db_get_string (arg[i])); + break; + + case DB_TYPE_NULL: + db_json_delete_doc (new_doc); + return db_make_null (result); + + default: + db_json_delete_doc (new_doc); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QSTR_INVALID_DATA_TYPE, 0); + return ER_QSTR_INVALID_DATA_TYPE; + } + + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + db_json_delete_doc (new_doc); + return error_code; + } + } + + db_make_json (result, new_doc, true); + + return NO_ERROR; +} int -db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args) +db_json_contains_path (DB_VALUE * result, DB_VALUE * arg[], const int num_args) +{ + bool find_all = false; + bool exists = false; + int error_code = NO_ERROR; + JSON_DOC *doc = NULL; + db_make_null (result); + + if (DB_IS_NULL (arg[0]) || DB_IS_NULL (arg[1])) + { + return NO_ERROR; + } + + doc = db_get_json_document (arg[0]); + const char *find_all_str = db_get_string (arg[1]); + + if (strcmp (find_all_str, "all") == 0) + { + find_all = true; + } + if (!find_all && strcmp (find_all_str, "one")) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QSTR_INVALID_DATA_TYPE, 0); + return ER_QSTR_INVALID_DATA_TYPE; + } + + for (int i = 2; i < num_args; ++i) + { + if (DB_IS_NULL (arg[i])) + { + return NO_ERROR; + } + } + + for (int i = 2; i < num_args; ++i) + { + const char *path = db_get_string (arg[i]); + + if (path == NULL) + { + return NO_ERROR; + } + + error_code = db_json_contains_path (doc, path, exists); + if (error_code) + { + return error_code; + } + + if (find_all && !exists) + { + error_code = db_make_int (result, (int) false); + return error_code; + } + if (!find_all && exists) + { + error_code = db_make_int (result, (int) true); + return error_code; + } + } + + // if we have not returned early last search is decisive + error_code = db_make_int (result, (int) exists); + return error_code; +} + +static int +db_json_merge_helper (DB_VALUE * result, DB_VALUE * arg[], int const num_args, bool patch) { int i; int error_code; @@ -3624,7 +3843,7 @@ db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args) switch (DB_VALUE_TYPE (arg[i])) { case DB_TYPE_JSON: - error_code = db_json_merge_func (arg[i]->data.json.document, accumulator); + error_code = db_json_merge_func (arg[i]->data.json.document, accumulator, patch); break; case DB_TYPE_CHAR: @@ -3632,12 +3851,12 @@ db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args) case DB_TYPE_NCHAR: case DB_TYPE_VARNCHAR: error_code = db_json_convert_string_and_call (db_get_string (arg[i]), db_get_string_size (arg[i]), - db_json_merge_func, accumulator); + db_json_merge_func, accumulator, patch); break; case DB_TYPE_NULL: // todo: isn't this too supposed to be NULL? - error_code = db_json_merge_func (NULL, accumulator); + error_code = db_json_merge_func (NULL, accumulator, patch); break; default: @@ -3658,19 +3877,153 @@ db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args) return NO_ERROR; } +/* + * db_json_merge () + * + * this function merges two by two json + * so merge (j1, j2, j3, j4) = merge_two (j1, (merge (j2, merge (j3, j4)))) + * + * result (out): the merge result + * arg (in): the arguments for the merge function + * num_args (in) + */ +int +db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args) +{ + return db_json_merge_helper (result, arg, num_args); +} + +/* + * db_json_merge_patch() + * + * this function merges two by two json without preserving members having duplicate keys + * so merge (j1, j2, j3, j4) = merge_two (j1, (merge (j2, merge (j3, j4)))) + * + * result (out): the merge result + * arg (in): the arguments for the merge function + * num_args (in) + */ +int +db_json_merge_patch (DB_VALUE * result, DB_VALUE * arg[], int const num_args) +{ + return db_json_merge_helper (result, arg, num_args, true); +} + +/* + * JSON_SEARCH (json_doc, one/all, pattern [, escape_char, path_1,... path_n]) + * + * db_json_search_dbval () + * function that finds paths of json_values that match the pattern argument + * result (out): json string or json array if there are more paths that match + * args (in): the arguments for the json_search function + * num_args (in) + */ + +/* *INDENT-OFF* */ +int +db_json_search_dbval (DB_VALUE * result, DB_VALUE * args[], const int num_args) +{ + int error_code = NO_ERROR; + + if (num_args < 3) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); + return ER_FAILED; + } + + for (int i = 0; i < num_args; ++i) + { + // only escape char might be null + if (i != 3 && DB_IS_NULL (args[i])) + { + return db_make_null (result); + } + } + + JSON_DOC *doc = db_get_json_document (args[0]); + char *find_all_str = db_get_string (args[1]); + bool find_all = false; + + if (strcmp (find_all_str, "all") == 0) + { + find_all = true; + } + if (!find_all && strcmp (find_all_str, "one")) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_QSTR_INVALID_DATA_TYPE, 0); + return ER_QSTR_INVALID_DATA_TYPE; + } + + DB_VALUE *pattern = args[2]; + DB_VALUE *esc_char = nullptr; + if (num_args >= 4) + { + esc_char = args[3]; + } + + std::vector starting_paths; + for (int i = 4; i < num_args; ++i) + { + starting_paths.push_back (db_get_string (args[i])); + } + if (starting_paths.empty ()) + { + starting_paths.push_back ("$"); + } + + std::vector paths; + error_code = db_json_search_func (*doc, pattern, esc_char, find_all, starting_paths, paths); + if (error_code != NO_ERROR) + { + return error_code; + } + + JSON_DOC *result_json = nullptr; + + if (paths.size () == 1) + { + error_code = db_json_get_json_from_str (paths[0].c_str (), result_json, paths[0].length ()); + if (error_code != NO_ERROR) + { + return error_code; + } + return db_make_json (result, result_json, true); + } + + result_json = db_json_allocate_doc (); + for (auto &path : paths) + { + JSON_DOC *json_array_elem = nullptr; + + error_code = db_json_get_json_from_str (path.c_str (), json_array_elem, path.length ()); + if (error_code != NO_ERROR) + { + db_json_delete_doc (result_json); + return error_code; + } + + db_json_add_element_to_array (result_json, json_array_elem); + + db_json_delete_doc (json_array_elem); + } + + return db_make_json (result, result_json, true); +} +/* *INDENT-ON* */ + int db_json_get_all_paths (DB_VALUE * result, DB_VALUE * arg[], int const num_args) { int error_code = NO_ERROR; JSON_DOC *new_doc = NULL; JSON_DOC *result_json = NULL; - char *str = NULL; db_make_null (result); if (num_args != 1) { assert (false); + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OBJ_INVALID_ARGUMENTS, 0); return ER_FAILED; } @@ -3729,13 +4082,13 @@ db_string_byte_length (const DB_VALUE * string, DB_VALUE * byte_count) int error_status = NO_ERROR; DB_TYPE str_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string != (DB_VALUE *) NULL); assert (byte_count != (DB_VALUE *) NULL); - /* + /* * Verify that the input string is a valid character * string. Bit strings are not allowed. * @@ -3794,13 +4147,13 @@ db_string_bit_length (const DB_VALUE * string, DB_VALUE * bit_count) int error_status = NO_ERROR; DB_TYPE str_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string != (DB_VALUE *) NULL); assert (bit_count != (DB_VALUE *) NULL); - /* + /* * Verify that the input string is a valid character string. * Bit strings are not allowed. * @@ -3864,14 +4217,14 @@ db_string_char_length (const DB_VALUE * string, DB_VALUE * char_count) { int error_status = NO_ERROR; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string != (DB_VALUE *) NULL); assert (char_count != (DB_VALUE *) NULL); - /* + /* * Verify that the input string is a valid character * string. Bit strings are not allowed. * @@ -3934,13 +4287,13 @@ db_string_lower (const DB_VALUE * string, DB_VALUE * lower_string) int error_status = NO_ERROR; DB_TYPE str_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string != (DB_VALUE *) NULL); assert (lower_string != (DB_VALUE *) NULL); - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. */ @@ -3954,7 +4307,7 @@ db_string_lower (const DB_VALUE * string, DB_VALUE * lower_string) { error_status = ER_QSTR_INVALID_DATA_TYPE; } - /* + /* * If the input parameters have been properly validated, then * we are ready to operate. */ @@ -4033,13 +4386,13 @@ db_string_upper (const DB_VALUE * string, DB_VALUE * upper_string) int error_status = NO_ERROR; DB_TYPE str_type; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (string != (DB_VALUE *) NULL); assert (upper_string != (DB_VALUE *) NULL); - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. */ @@ -4053,7 +4406,7 @@ db_string_upper (const DB_VALUE * string, DB_VALUE * upper_string) { error_status = ER_QSTR_INVALID_DATA_TYPE; } - /* + /* * If the input parameters have been properly validated, then * we are ready to operate. */ @@ -4131,7 +4484,7 @@ db_string_trim (const MISC_OPERAND tr_operand, const DB_VALUE * trim_charset, co int trim_charset_size = 0; DB_TYPE src_type, trim_type; - /* + /* * Assert DB_VALUE structures have been allocated */ @@ -4175,7 +4528,7 @@ db_string_trim (const MISC_OPERAND tr_operand, const DB_VALUE * trim_charset, co } } - /* + /* * Verify input parameters are all char strings and are compatible */ @@ -4196,7 +4549,7 @@ db_string_trim (const MISC_OPERAND tr_operand, const DB_VALUE * trim_charset, co return error_status; } - /* + /* * begin of main codes */ if (!is_trim_charset_omitted) @@ -4305,7 +4658,7 @@ qstr_trim (MISC_OPERAND trim_operand, const unsigned char *trim_charset, int tri * codeset: (in) International codeset of source string. * lead_trimmed_ptr: (out) Pointer to start of trimmed string. * lead_trimmed_length: (out) Length of trimmed string. - * trim_ascii_spaces: (in) Option to trim normal spaces also. + * trim_ascii_spaces: (in) Option to trim normal spaces also. * * Returns: nothing * @@ -4626,7 +4979,7 @@ qstr_pad (MISC_OPERAND pad_operand, int pad_length, const unsigned char *pad_cha return error_status; } - /* + /* * now start padding */ @@ -4643,7 +4996,7 @@ qstr_pad (MISC_OPERAND pad_operand, int pad_length, const unsigned char *pad_cha return error_status; } - /* + /* * Get real length to be paded * if source length is greater than pad_length */ @@ -4740,7 +5093,7 @@ db_string_like (const DB_VALUE * src_string, const DB_VALUE * pattern, const DB_ int src_length = 0, pattern_length = 0; int coll_id; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != NULL); @@ -5763,7 +6116,7 @@ qstr_replace (unsigned char *src_buf, int src_len, int src_size, INTL_CODESET co *result_buf = NULL; - /* + /* * if search string is NULL or is longer than source string * copy source string as a result */ @@ -6231,7 +6584,7 @@ db_char_string_coerce (const DB_VALUE * src_string, DB_VALUE * dest_string, DB_D /* Initialize status value */ *data_status = DATA_STATUS_OK; - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. * Verify that the source and destination strings are of @@ -6385,7 +6738,7 @@ db_find_string_in_in_set (const DB_VALUE * needle, const DB_VALUE * stack, DB_VA return NO_ERROR; } - /* + /* * Categorize the parameters into respective code sets. * Verify that the parameters are both character strings. * Verify that the input strings belong to compatible code sets. @@ -6973,13 +7326,13 @@ db_string_convert (const DB_VALUE * src_string, DB_VALUE * dest_string) DB_TYPE src_type, dest_type; int error_status = NO_ERROR; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_string != (DB_VALUE *) NULL); assert (dest_string != (DB_VALUE *) NULL); - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. */ @@ -7065,7 +7418,7 @@ db_string_convert (const DB_VALUE * src_string, DB_VALUE * dest_string) } } - /* + /* * If intl_convert_charset() returned an error, map * to an ER_QSTR_INCOMPATIBLE_CODE_SETS error. */ @@ -7077,7 +7430,7 @@ db_string_convert (const DB_VALUE * src_string, DB_VALUE * dest_string) return error_status; - /* + /* * Error handling */ mem_error: @@ -7365,7 +7718,7 @@ qstr_bit_to_hex_coerce (char *buffer, int buffer_size, const char *src, int src_ if (buffer_size > (2 * src_size)) { - /* + /* * No truncation; copy the data and blank pad if necessary. */ qstr_bin_to_hex (buffer, buffer_size, src, src_size); @@ -7387,7 +7740,7 @@ qstr_bit_to_hex_coerce (char *buffer, int buffer_size, const char *src, int src_ } else { - /* + /* * Truncation is necessary; put as many bytes as possible into * the receiving buffer and null-terminate it (i.e., it receives * at most dstsize-1 bytes). If there is not outlen indicator by @@ -8328,7 +8681,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type { return ER_QSTR_INVALID_DATA_TYPE; } - /* + /* * Categorize the source string into fixed and variable * length. Variable length strings are simple. Fixed * length strings have to be handled special since the @@ -8357,13 +8710,13 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type s2_logical_length = s2_length; } - /* + /* * If both source strings are fixed-length, the concatenated * result will be fixed-length. */ if (QSTR_IS_FIXED_LENGTH (s1_type) && QSTR_IS_FIXED_LENGTH (s2_type)) { - /* + /* * The result will be a chararacter string of length = * string1_precision + string2_precision. If the result * length is greater than the maximum allowed for a fixed @@ -8386,7 +8739,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type *result_size = *result_length; } - /* + /* * Determine how much of s1 is already copied. * Remember that this may or may not include needed padding. * Then determine how much padding must be added to each @@ -8398,7 +8751,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type pad1_length = MIN (s1_logical_length, *result_length) - copy_length; length_left = *result_length - copy_length - pad1_length; - /* + /* * Determine how much of string2 can be concatenated after * string1. Remember that string2 is concatentated after * the full length of string1 including any necessary pad @@ -8409,7 +8762,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type pad2_length = length_left - cat_length; - /* + /* * Pad string s1, Copy the s2 string after the s1 string */ cat_ptr = qstr_pad_string ((unsigned char *) &(s1[copy_size]), pad1_length, codeset); @@ -8417,13 +8770,13 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type memcpy ((char *) cat_ptr, (char *) s2, cat_size); (void) qstr_pad_string ((unsigned char *) &cat_ptr[cat_size], pad2_length, codeset); } - /* + /* * If either source string is variable-length, the concatenated * result will be variable-length. */ else { - /* + /* * The result length will be the sum of the lengths of * the two source strings. If this is greater than the * maximum length of a variable length string, then the @@ -8441,7 +8794,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type *result_size = *result_length; } - /* + /* * Calculate the number of characters from string1 that are already * into the result. If s1 string is larger than the expected entire * string and if the portion of the string s1 contained anything but @@ -8462,7 +8815,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type pad1_length = MIN (s1_logical_length, *result_length) - copy_length; length_left = *result_length - copy_length - pad1_length; - /* + /* * Processess string2 as we did for string1. */ cat_length = s2_length; @@ -8479,7 +8832,7 @@ qstr_append (unsigned char *s1, int s1_length, int s1_precision, DB_TYPE s1_type pad2_length = length_left - cat_length; - /* + /* * Actually perform the copy operation. */ cat_ptr = qstr_pad_string ((unsigned char *) &(s1[copy_size]), pad1_length, codeset); @@ -8532,7 +8885,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T *data_status = DATA_STATUS_OK; - /* + /* * Categorize the source string into fixed and variable * length. Variable length strings are simple. Fixed * length strings have to be handled special since the @@ -8560,13 +8913,13 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T s2_logical_length = s2_length; } - /* + /* * If both source strings are fixed-length, the concatenated * result will be fixed-length. */ if (QSTR_IS_FIXED_LENGTH (s1_type) && QSTR_IS_FIXED_LENGTH (s2_type)) { - /* + /* * The result will be a chararacter string of length = * string1_precision + string2_precision. If the result * length is greater than the maximum allowed for a fixed @@ -8614,7 +8967,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T goto mem_error; } - /* + /* * Determine how much of string1 needs to be copied. * Remember that this may or may not include needed padding. * Then determine how much padding must be added to each @@ -8626,7 +8979,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T pad1_length = MIN (s1_logical_length, *result_length) - copy_length; length_left = *result_length - copy_length - pad1_length; - /* + /* * Determine how much of string2 can be concatenated after * string1. Remember that string2 is concatentated after * the full length of string1 including any necessary pad @@ -8637,7 +8990,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T pad2_length = length_left - cat_length; - /* + /* * Copy the source strings into the result string */ memcpy ((char *) *result, (char *) s1, copy_size); @@ -8646,13 +8999,13 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T memcpy ((char *) cat_ptr, (char *) s2, cat_size); (void) qstr_pad_string ((unsigned char *) &cat_ptr[cat_size], pad2_length, codeset); } - /* + /* * If either source string is variable-length, the concatenated * result will be variable-length. */ else { - /* + /* * The result length will be the sum of the lengths of * the two source strings. If this is greater than the * maximum length of a variable length string, then the @@ -8697,7 +9050,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T } - /* + /* * Calculate the number of characters from string1 that can * be copied to the result. If we cannot copy the entire * string and if the portion of the string which was not @@ -8719,7 +9072,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T pad1_length = MIN (s1_logical_length, *result_length) - copy_length; length_left = *result_length - copy_length - pad1_length; - /* + /* * Processess string2 as we did for string1. */ cat_length = s2_length; @@ -8736,7 +9089,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T pad2_length = length_left - cat_length; - /* + /* * Actually perform the copy operations. */ memcpy ((char *) *result, (char *) s1, copy_size); @@ -8755,7 +9108,7 @@ qstr_concatenate (const unsigned char *s1, int s1_length, int s1_precision, DB_T er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error_status, 2, *result_size, (int) prm_get_bigint_value (PRM_ID_STRING_MAX_SIZE_BYTES)); return error_status; - /* + /* * Error handler */ mem_error: @@ -8799,7 +9152,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, *data_status = DATA_STATUS_OK; - /* + /* * Calculate the byte size of the strings. * Calculate the bit length and byte size needed to concatenate * the two strings without truncation. @@ -8808,7 +9161,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, s2_size = QSTR_NUM_BYTES (s2_length); - /* + /* * Categorize the source string into fixed and variable * length. Variable length strings are simple. Fixed * length strings have to be handled special since the @@ -8839,7 +9192,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, if ((s1_type == DB_TYPE_BIT) && (s2_type == DB_TYPE_BIT)) { - /* + /* * The result will be a bit string of length = * string1_precision + string2_precision. If the result * length is greater than the maximum allowed for a fixed @@ -8869,13 +9222,13 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, goto mem_error; } - /* + /* * The source strings may not be fully padded, so * we pre-pad the result string. */ (void) memset ((char *) *result, (int) 0, (int) *result_size); - /* + /* * Determine how much of string1 needs to be copied. * Remember that this may or may not include needed padding */ @@ -8885,7 +9238,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, copy_length = *result_length; } - /* + /* * Determine how much of string2 can be concatenated after * string1. Remember that string2 is concatentated after * the full length of string1 including any necessary pad @@ -8898,7 +9251,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, } - /* + /* * Copy the source strings into the result string. * We are being a bit sloppy here by performing a byte * copy as opposed to a bit copy. But this should be OK @@ -8909,7 +9262,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, else /* Assume DB_TYPE_VARBIT */ { - /* + /* * The result length will be the sum of the lengths of * the two source strings. If this is greater than the * maximum length of a variable length string, then the @@ -8934,13 +9287,13 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, goto mem_error; } - /* + /* * The source strings may not be fully padded, so * we pre-pad the result string. */ (void) memset ((char *) *result, (int) 0, (int) *result_size); - /* + /* * Calculate the number of bits from string1 that can * be copied to the result. If we cannot copy the entire * string and if the portion of the string which was not @@ -8969,7 +9322,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, } - /* + /* * Actually perform the copy operations and * place the result string in a container. */ @@ -8985,7 +9338,7 @@ qstr_bit_concatenate (const unsigned char *s1, int s1_length, int s1_precision, (int) prm_get_bigint_value (PRM_ID_STRING_MAX_SIZE_BYTES)); return error_status; - /* + /* * Error handling */ mem_error: @@ -9132,7 +9485,7 @@ bit_ncat (unsigned char *r, int offset, const unsigned char *s, int n) shift_amount = BYTE_SIZE - remainder; mask = 0xff << shift_amount; - /* + /* * tmp_shifted is loaded with a byte from the source * string and shifted into poition. The upper byte is * used for the current destination location, while the @@ -9183,7 +9536,7 @@ bstring_fls (const char *s, int n) int byte_num, bit_num, inter_bit_num; - /* + /* * We are looking for the first non-zero byte (starting at the end). */ byte_num = n - 1; @@ -9192,7 +9545,7 @@ bstring_fls (const char *s, int n) byte_num--; } - /* + /* * If byte_num is < 0, then the string is all 0's. * Othersize, byte_num is the index for the first byte which has * some bits set (from the end). @@ -9239,7 +9592,7 @@ qstr_bit_coerce (const unsigned char *src, int src_length, int src_precision, DB *data_status = DATA_STATUS_OK; - /* + /* * is the length of the fully padded * source string. */ @@ -9252,7 +9605,7 @@ qstr_bit_coerce (const unsigned char *src, int src_length, int src_precision, DB src_padded_length = src_length; } - /* + /* * If there is not enough precision in the destination string, * then some bits will be omited from the source string. */ @@ -9265,7 +9618,7 @@ qstr_bit_coerce (const unsigned char *src, int src_length, int src_precision, DB copy_length = MIN (src_length, src_padded_length); copy_size = QSTR_NUM_BYTES (copy_length); - /* + /* * For fixed-length destination strings... * Allocate the destination precision size, copy the source * string and pad the rest. @@ -9333,7 +9686,7 @@ qstr_coerce (const unsigned char *src, int src_length, int src_precision, DB_TYP *data_status = DATA_STATUS_OK; *dest_size = 0; - /* + /* * is the length of the fully padded * source string. */ @@ -9346,7 +9699,7 @@ qstr_coerce (const unsigned char *src, int src_length, int src_precision, DB_TYP src_padded_length = src_length; } - /* + /* * Some characters will be truncated if there is not enough * precision in the destination string. If any of the * truncated characters are non-pad characters, a truncation @@ -9364,7 +9717,7 @@ qstr_coerce (const unsigned char *src, int src_length, int src_precision, DB_TYP copy_length = MIN (src_length, src_padded_length); - /* + /* * For fixed-length destination strings... * Allocate the destination precision size, copy the source * string and pad the rest. @@ -9590,7 +9943,7 @@ qstr_position (const char *sub_string, const int sub_size, const int sub_length, codeset = lc->codeset; - /* + /* * Since the entire sub-string must be matched, a reduced * number of compares are needed. A collation-based * comparison will be used. @@ -9610,7 +9963,7 @@ qstr_position (const char *sub_string, const int sub_size, const int sub_length, } } - /* + /* * Starting at the first position of the string, match the * sub-string to the source string. If a match is not found, * then increment into the source string by one character and @@ -9657,7 +10010,7 @@ qstr_position (const char *sub_string, const int sub_size, const int sub_length, } } - /* + /* * Return the position of the match, if found. */ if (result == 0) @@ -9721,7 +10074,7 @@ qstr_bit_position (const unsigned char *sub_string, int sub_length, const unsign shift_amount = BYTE_SIZE - sub_remainder; mask = 0xff << shift_amount; - /* + /* * We will be manipulating the source string prior to * comparison. So that we do not corrupt the source string, * we'll allocate a storage area so that we can make a copy @@ -9738,7 +10091,7 @@ qstr_bit_position (const unsigned char *sub_string, int sub_length, const unsign { ptr = (unsigned char *) src_string; - /* + /* * Make a copy of the source string. * Initialize the bit index. */ @@ -9763,7 +10116,7 @@ qstr_bit_position (const unsigned char *sub_string, int sub_length, const unsign i++; - /* + /* * Every time we hit a byte boundary, * Move on to the next byte of the source string. */ @@ -9777,7 +10130,7 @@ qstr_bit_position (const unsigned char *sub_string, int sub_length, const unsign db_private_free_and_init (NULL, tmp_string); - /* + /* * If a match was found, then return the position * of the match. */ @@ -9865,7 +10218,7 @@ qstr_substring (const unsigned char *src, int src_length, int start, int length, /* Get the size of the source string. */ intl_char_size ((unsigned char *) src, src_length, codeset, &src_size); - /* + /* * Perform some error chaecking. * If the starting position is < 1, then set it to 1. * If the starting position is after the end of the source string, @@ -9891,7 +10244,7 @@ qstr_substring (const unsigned char *src, int src_length, int start, int length, *r_length = length; - /* + /* * Get a pointer to the start of the sub-string and the * size of the sub-string. * @@ -9946,7 +10299,7 @@ qstr_bit_substring (const unsigned char *src, int src_length, int start, int len src_size = QSTR_NUM_BYTES (src_length); - /* + /* * Perform some error checking. * If the starting position is < 1, then set it to 1. * If the starting position is after the end of the source @@ -9983,7 +10336,7 @@ qstr_bit_substring (const unsigned char *src, int src_length, int start, int len trailing_mask = 0xff << (BYTE_SIZE - rem); } - /* + /* * Allocate storage for the sub-string. * Copy the sub-string. */ @@ -12385,7 +12738,7 @@ db_sys_datetime (DB_VALUE * result_datetime) } /* - * db_sys_date_and_epoch_time () - This function returns current + * db_sys_date_and_epoch_time () - This function returns current * datetime and timestamp. * * return: status of the error @@ -17764,7 +18117,7 @@ lob_length (const DB_VALUE * src_value, DB_VALUE * result_value) elo = db_get_elo (src_value); if (elo) { - /* + /* * Hack: * In order to check the existence of the file, * it is required to make to invoke real file operation. @@ -19066,7 +19419,7 @@ make_number (char *src, char *last_src, INTL_CODESET codeset, char *token, int * /* This line needs to be modified to reflect appropriate error */ } - /* + /* * modify result_str to contain correct string value with respect to * the given precision and scale. */ @@ -19098,7 +19451,7 @@ make_number (char *src, char *last_src, INTL_CODESET codeset, char *token, int * { convert_locale_number (result_str, strlen (result_str), number_lang_id, INTL_LANG_ENGLISH); } - /* + /* * modify result_str to contain correct string value with respect to * the given precision and scale. */ @@ -19800,13 +20153,13 @@ db_string_reverse (const DB_VALUE * src_str, DB_VALUE * result_str) DB_TYPE str_type; char *res = NULL; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_str != (DB_VALUE *) NULL); assert (result_str != (DB_VALUE *) NULL); - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. */ @@ -19820,7 +20173,7 @@ db_string_reverse (const DB_VALUE * src_str, DB_VALUE * result_str) { error_status = ER_QSTR_INVALID_DATA_TYPE; } - /* + /* * If the input parameters have been properly validated, then * we are ready to operate. */ @@ -20764,7 +21117,7 @@ copy_and_shift_values (int shift, int n, DB_BIGINT * first, ...) DB_BIGINT *v[16]; /* will contain max 5 elements */ int i, count = 0, cnt_src = 0; - /* + /* * numeric arguments from interval expression have a delimiter read also * as argument so out of N arguments there are actually (N + 1)/2 numeric * values (ex: 1:2:3:4 or 1:2 or 1:2:3) @@ -20886,7 +21239,7 @@ db_date_add_sub_interval_expr (DB_VALUE * result, const DB_VALUE * date, const D /* 1. Prepare the input: convert expr to char */ - /* + /* * expr is converted to char because it may contain a more complicated form * for the multiple unit formats, for example: * 'DAYS HOURS:MINUTES:SECONDS.MILLISECONDS' @@ -22639,7 +22992,7 @@ db_str_to_date (const DB_VALUE * str, const DB_VALUE * format, const DB_VALUE * } } - /* + /* * 1. Get information according to format specifiers * iterate simultaneously through each string and sscanf when * it is a format specifier. @@ -23226,7 +23579,7 @@ db_str_to_date (const DB_VALUE * str, const DB_VALUE * format, const DB_VALUE * break; case 'X': - /* %X Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V + /* %X Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V */ k = parse_digits (sstr + i, &y, 4); if (k <= 0) @@ -23345,13 +23698,13 @@ db_str_to_date (const DB_VALUE * str, const DB_VALUE * format, const DB_VALUE * days[2] += LEAP (y); - /* + /* * validations are done here because they are done just on the last memorized * values (ie: if you supply a month 99 then a month 12 the 99 isn't validated * because it's overwritten by 12 which is correct). */ - /* + /* * check only upper bounds, lower bounds will be checked later and * will return error */ @@ -23442,7 +23795,7 @@ db_str_to_date (const DB_VALUE * str, const DB_VALUE * format, const DB_VALUE * } /* the year is fixed, compute the day and month from dow, doy, etc */ - /* + /* * the day and month can be supplied specifically which supress all other * informations or can be computed from dow and week or from doy */ @@ -26792,7 +27145,7 @@ db_conv (const DB_VALUE * num, const DB_VALUE * from_base, const DB_VALUE * to_b num_p_str = db_get_bit (num, &num_size); num_size = QSTR_NUM_BYTES (num_size); - /* convert to hex; NOTE: qstr_bin_to_hex returns number of converted bytes, not the size of the hex string; also, + /* convert to hex; NOTE: qstr_bin_to_hex returns number of converted bytes, not the size of the hex string; also, * we convert at most 64 digits even if we need only 16 in order to let strtoll handle overflow (weird stuff * happens there ...) */ num_size = qstr_bin_to_hex (num_str, UINT64_MAX_BIN_DIGITS, num_p_str, num_size); @@ -28022,7 +28375,7 @@ db_string_extract_dbval (const MISC_OPERAND extr_operand, DB_VALUE * dbval_p, DB * * return error code or NO_ERROR * time_val (in) : time value (datetime or time) - * tz_source (in) : source timezone string + * tz_source (in) : source timezone string * tz_dest (in) : dest timezone string * result_time (out) : result */ @@ -28034,7 +28387,7 @@ db_new_time (DB_VALUE * time_val, DB_VALUE * tz_source, DB_VALUE * tz_dest, DB_V DB_TIME *time = NULL; char *t_source, *t_dest; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (time_val != (DB_VALUE *) NULL); @@ -28113,7 +28466,7 @@ db_new_time (DB_VALUE * time_val, DB_VALUE * tz_source, DB_VALUE * tz_dest, DB_V /* * db_tz_offset () - retrieve the timezone offset for src_str source timezone * - * return: error or no error + * return: error or no error * src_str(in): source DB_VALUE timezone string or offset * date_time(in): current UTC datetime * result_str(out): result DB_VALUE string @@ -28126,13 +28479,13 @@ db_tz_offset (const DB_VALUE * src_str, DB_VALUE * result_str, DB_DATETIME * dat DB_TYPE str_type; char *res; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (src_str != (DB_VALUE *) NULL); assert (result_str != (DB_VALUE *) NULL); - /* + /* * Categorize the two input parameters and check for errors. * Verify that the parameters are both character strings. */ @@ -28147,7 +28500,7 @@ db_tz_offset (const DB_VALUE * src_str, DB_VALUE * result_str, DB_DATETIME * dat error_status = ER_QSTR_INVALID_DATA_TYPE; er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error_status, 0); } - /* + /* * If the input parameters have been properly validated, then * we are ready to operate. */ @@ -28187,7 +28540,7 @@ db_tz_offset (const DB_VALUE * src_str, DB_VALUE * result_str, DB_DATETIME * dat * db_from_tz () - adds timezone information to the time_val * Return: error code or NO_ERROR * time_val (in) : time value (datetime, time or timestamp) - * tz (in) : timezone string + * tz (in) : timezone string * time_val_with_tz (out) : timeval with timezone information */ @@ -28198,7 +28551,7 @@ db_from_tz (DB_VALUE * time_val, DB_VALUE * tz, DB_VALUE * time_val_with_tz) int len_timezone, error = NO_ERROR; DB_DATETIME *datetime = NULL; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (time_val != (DB_VALUE *) NULL); @@ -28296,7 +28649,7 @@ db_conv_tz (DB_VALUE * time_val, DB_VALUE * result_time) DB_TIMESTAMPTZ *timestamptz = NULL; DB_TIMESTAMP *timestamp = NULL; - /* + /* * Assert that DB_VALUE structures have been allocated. */ assert (time_val != (DB_VALUE *) NULL); diff --git a/src/query/string_opfunc.h b/src/query/string_opfunc.h index a7663b5a925..6e76bbb4f1b 100644 --- a/src/query/string_opfunc.h +++ b/src/query/string_opfunc.h @@ -183,9 +183,11 @@ extern int db_string_chr (DB_VALUE * res, DB_VALUE * dbval1, DB_VALUE * dbval2); extern int db_string_instr (const DB_VALUE * src_string, const DB_VALUE * sub_string, const DB_VALUE * start_pos, DB_VALUE * result); extern int db_string_position (const DB_VALUE * sub_string, const DB_VALUE * src_string, DB_VALUE * result); +extern int db_json_search_dbval (DB_VALUE * result, DB_VALUE * args[], const int num_args); extern int db_string_substring (const MISC_OPERAND substr_operand, const DB_VALUE * src_string, const DB_VALUE * start_position, const DB_VALUE * extraction_length, DB_VALUE * sub_string); +extern int db_string_quote (const DB_VALUE * src_string, DB_VALUE * res); extern int db_string_repeat (const DB_VALUE * src_string, const DB_VALUE * count, DB_VALUE * result); extern int db_string_substring_index (DB_VALUE * src_string, DB_VALUE * delim_string, const DB_VALUE * count, DB_VALUE * result); @@ -206,8 +208,12 @@ extern int db_json_set (DB_VALUE * result, DB_VALUE * arg[], const int num_args) extern int db_json_keys (DB_VALUE * result, DB_VALUE * arg[], const int num_args); extern int db_json_remove (DB_VALUE * result, DB_VALUE * arg[], int const num_args); extern int db_json_array_append (DB_VALUE * result, DB_VALUE * arg[], int const num_args); +extern int db_json_array_insert (DB_VALUE * result, DB_VALUE * arg[], int const num_args); +extern int db_json_contains_path (DB_VALUE * result, DB_VALUE * arg[], const int num_args); extern int db_json_merge (DB_VALUE * result, DB_VALUE * arg[], int const num_args); +extern int db_json_merge_patch (DB_VALUE * result, DB_VALUE * arg[], int const num_args); extern int db_json_get_all_paths (DB_VALUE * result, DB_VALUE * arg[], int const num_args); +extern int db_json_pretty (DB_VALUE * result, DB_VALUE * arg[], int const num_args); #if defined (ENABLE_UNUSED_FUNCTION) extern int db_string_byte_length (const DB_VALUE * string, DB_VALUE * byte_count); diff --git a/src/query/vacuum.c b/src/query/vacuum.c index cdacfc2c72d..1e005f65251 100644 --- a/src/query/vacuum.c +++ b/src/query/vacuum.c @@ -2649,7 +2649,7 @@ vacuum_push_task (THREAD_ENTRY * thread_p, const VACUUM_DATA_ENTRY & data_entry, vacuum_convert_thread_to_worker (thread_p, worker_p, save_type); assert (save_type == thread_type::TT_VACUUM_MASTER); #endif // SA_MODE - (void) thread_p; // not used + (void) thread_p; // not used if (vacuum_Data.shutdown_requested) { // stop pushing tasks; worker pool may be stopped already @@ -4165,7 +4165,6 @@ vacuum_data_load_and_recover (THREAD_ENTRY * thread_p) else { /* Get last_blockid from last vacuum data entry. */ - assert (vacuum_Data.last_page->index_free > 0); INT16 last_block_index = (vacuum_Data.last_page->index_free <= 0) ? 0 : vacuum_Data.last_page->index_free - 1; vacuum_Data.set_last_blockid (vacuum_Data.last_page->data[last_block_index].blockid); @@ -5088,8 +5087,7 @@ vacuum_consume_buffer_log_blocks (THREAD_ENTRY * thread_p) if (is_sysop) { - // not really expected, but... - assert (false); + // more than one page in one iteration, now that's a performance log_sysop_commit (thread_p); } @@ -5140,6 +5138,7 @@ vacuum_consume_buffer_log_blocks (THREAD_ENTRY * thread_p) { /* Page is empty. We don't want to add a new block that does not require vacuum. */ assert (data_page->index_unvacuumed == 0); + next_blockid = consumed_data.blockid - 1; // for will increment it to consumed_data.blockid continue; } diff --git a/src/query/xasl.h b/src/query/xasl.h index 3500bb9626e..42be488ee1b 100644 --- a/src/query/xasl.h +++ b/src/query/xasl.h @@ -28,20 +28,21 @@ #include -#include "storage_common.h" +#include "access_json_table.hpp" #include "memory_hash.h" -#include "string_opfunc.h" #include "query_list.h" #include "regu_var.h" +#include "storage_common.h" +#include "string_opfunc.h" #if defined (SERVER_MODE) || defined (SA_MODE) +#include "external_sort.h" +#include "heap_file.h" #if defined (ENABLE_COMPOSITE_LOCK) #include "lock_manager.h" #endif /* defined (ENABLE_COMPOSITE_LOCK) */ -#include "external_sort.h" #include "object_representation_sr.h" #include "scan_manager.h" -#include "heap_file.h" #endif /* defined (SERVER_MODE) || defined (SA_MODE) */ #if defined (SERVER_MODE) || defined (SA_MODE) @@ -49,26 +50,7 @@ struct binary_heap; #endif // SERVER_MODE || SA_MODE -/* - * COMPILE_CONTEXT cover from user input query string to generated xasl - */ -typedef struct compile_context COMPILE_CONTEXT; -struct compile_context -{ - XASL_NODE *xasl; - char *sql_user_text; /* original query statement that user input */ - int sql_user_text_len; /* length of sql_user_text */ - - char *sql_hash_text; /* rewrited query string which is used as hash key */ - - char *sql_plan_text; /* plans for this query */ - int sql_plan_alloc_size; /* query_plan alloc size */ - bool is_xasl_pinned_reference; /* to pin xasl cache entry */ - bool recompile_xasl_pinned; /* whether recompile again after xasl cache entry has been pinned */ - bool recompile_xasl; - SHA1Hash sha1; -}; /* XASL HEADER */ /* @@ -500,7 +482,7 @@ struct cte_proc_node #define XASL_RETURN_GENERATED_KEYS 0x2000 /* return generated keys */ #define XASL_NO_FIXED_SCAN 0x4000 /* disable fixed scan for this proc */ -#define XASL_IS_FLAGED(x, f) ((x)->flag & (int) (f)) +#define XASL_IS_FLAGED(x, f) (((x)->flag & (int) (f)) != 0) #define XASL_SET_FLAG(x, f) (x)->flag |= (int) (f) #define XASL_CLEAR_FLAG(x, f) (x)->flag &= (int) ~(f) @@ -705,6 +687,7 @@ typedef enum TARGET_CLASS_ATTR, TARGET_LIST, TARGET_SET, + TARGET_JSON_TABLE, TARGET_METHOD, TARGET_REGUVAL_LIST, TARGET_SHOWSTMT @@ -714,6 +697,7 @@ typedef enum { ACCESS_METHOD_SEQUENTIAL, /* sequential scan access */ ACCESS_METHOD_INDEX, /* indexed access */ + ACCESS_METHOD_JSON_TABLE, /* json table scan access */ ACCESS_METHOD_SCHEMA, /* schema access */ ACCESS_METHOD_SEQUENTIAL_RECORD_INFO, /* sequential scan that will read record info */ ACCESS_METHOD_SEQUENTIAL_PAGE_SCAN, /* sequential scan access that only scans pages without accessing record data */ @@ -775,12 +759,6 @@ struct list_spec_node XASL_NODE *xasl_node; /* the XASL node that contains the list file identifier */ }; -typedef enum -{ - KILLSTMT_TRAN = 0, - KILLSTMT_QUERY = 1, -} KILLSTMT_TYPE; - struct showstmt_spec_node { SHOWSTMT_TYPE show_type; /* show statement type */ @@ -834,6 +812,7 @@ union hybrid_node SET_SPEC_TYPE set_node; /* set specification */ METHOD_SPEC_TYPE method_node; /* method specification */ REGUVAL_LIST_SPEC_TYPE reguval_list_node; /* reguval_list specification */ + json_table_spec_node json_table_node; /* json_table specification */ }; /* class/list access specification */ /* @@ -885,6 +864,9 @@ union hybrid_node #define ACCESS_SPEC_METHOD_SPEC(ptr) \ ((ptr)->s.method_node) +#define ACCESS_SPEC_JSON_TABLE_SPEC(ptr) \ + ((ptr)->s.json_table_node) + #define ACCESS_SPEC_METHOD_XASL_NODE(ptr) \ ((ptr)->s.method_node.xasl_node) @@ -897,6 +879,15 @@ union hybrid_node #define ACCESS_SPEC_METHOD_LIST_ID(ptr) \ (ACCESS_SPEC_METHOD_XASL_NODE(ptr)->list_id) +#define ACCESS_SPEC_JSON_TABLE_ROOT_NODE(ptr) \ + ((ptr)->s.json_table_node.m_root_node) + +#define ACCESS_SPEC_JSON_TABLE_REGU_VAR(ptr) \ + ((ptr)->s.json_table_node.m_json_reguvar) + +#define ACCESS_SPEC_JSON_TABLE_M_NODE_COUNT(ptr) \ + ((ptr)->s.json_table_node.m_node_count) + #if defined (SERVER_MODE) || defined (SA_MODE) struct orderby_stat { @@ -1002,7 +993,7 @@ struct xasl_node ACCESS_SPEC_TYPE *merge_spec; /* merge spec. node */ VAL_LIST *val_list; /* output-value list */ VAL_LIST *merge_val_list; /* value list for the merge spec */ - XASL_NODE *aptr_list; /* first uncorrelated subquery */ + XASL_NODE *aptr_list; /* CTEs and uncorrelated subquery. CTEs are guaranteed always before the subqueries */ XASL_NODE *bptr_list; /* OBJFETCH_PROC list */ XASL_NODE *dptr_list; /* corr. subquery list */ PRED_EXPR *after_join_pred; /* after-join predicate */ @@ -1040,7 +1031,7 @@ struct xasl_node * UPDATE/DELETE in MVCC */ #if defined (ENABLE_COMPOSITE_LOCK) /* note: upon reactivation, you may face header cross reference issues */ - LK_COMPOSITE_LOCK composite_lock; /* flag and lock block for composite locking for queries which obtain candidate + LK_COMPOSITE_LOCK composite_lock; /* flag and lock block for composite locking for queries which obtain candidate * rows for updates/deletes. */ #endif /* defined (ENABLE_COMPOSITE_LOCK) */ union diff --git a/src/query/xasl_cache.c b/src/query/xasl_cache.c index 20a0e9ef7d9..64ca1edecd1 100644 --- a/src/query/xasl_cache.c +++ b/src/query/xasl_cache.c @@ -23,21 +23,22 @@ #ident "$Id$" -#include +#include "xasl_cache.h" +#include "binaryheap.h" +#include "compile_context.h" #include "config.h" - -#include "xasl_cache.h" +#include "list_file.h" #include "perf_monitor.h" #include "query_executor.h" -#include "list_file.h" -#include "binaryheap.h" -#include "statistics_sr.h" #include "query_manager.h" +#include "statistics_sr.h" #include "stream_to_xasl.h" #include "thread_entry.hpp" #include "thread_manager.hpp" +#include + #define XCACHE_ENTRY_MARK_DELETED ((INT32) 0x80000000) #define XCACHE_ENTRY_TO_BE_RECOMPILED ((INT32) 0x40000000) #define XCACHE_ENTRY_WAS_RECOMPILED ((INT32) 0x20000000) @@ -1321,7 +1322,7 @@ xcache_need_cleanup (void) * xcache_entry (out) : XASL cache entry. */ int -xcache_insert (THREAD_ENTRY * thread_p, const COMPILE_CONTEXT * context, XASL_STREAM * stream, +xcache_insert (THREAD_ENTRY * thread_p, const compile_context * context, XASL_STREAM * stream, int n_oid, const OID * class_oids, const int *class_locks, const int *tcards, XASL_CACHE_ENTRY ** xcache_entry) { diff --git a/src/query/xasl_cache.h b/src/query/xasl_cache.h index 2f2dae5341e..24841d86182 100644 --- a/src/query/xasl_cache.h +++ b/src/query/xasl_cache.h @@ -32,6 +32,9 @@ #include "xasl.h" +// forward definitions +struct compile_context; + /* Objects related to XASL cache entries. The information includes the object OID, the lock required to use the XASL * cache entry and the heap file cardinality. * Objects can be classes or serials. The heap file cardinality is only relevant for classes. @@ -141,7 +144,7 @@ extern int xcache_find_sha1 (THREAD_ENTRY * thread_p, const SHA1Hash * sha1, con extern int xcache_find_xasl_id (THREAD_ENTRY * thread_p, const XASL_ID * xid, XASL_CACHE_ENTRY ** xcache_entry, XASL_CLONE * xclone); extern void xcache_unfix (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry); -extern int xcache_insert (THREAD_ENTRY * thread_p, const COMPILE_CONTEXT * context, XASL_STREAM * stream, +extern int xcache_insert (THREAD_ENTRY * thread_p, const compile_context * context, XASL_STREAM * stream, int n_oid, const OID * class_oids, const int *class_locks, const int *tcards, XASL_CACHE_ENTRY ** xcache_entry); extern void xcache_remove_by_oid (THREAD_ENTRY * thread_p, OID * oid); diff --git a/src/query/xasl_to_stream.c b/src/query/xasl_to_stream.c index ae6586c3d84..54001e4a1f1 100644 --- a/src/query/xasl_to_stream.c +++ b/src/query/xasl_to_stream.c @@ -39,23 +39,7 @@ #include "object_primitive.h" #include "work_space.h" #include "memory_alloc.h" - -/* memory alignment unit - to align stored XASL tree nodes */ -#define ALIGN_UNIT sizeof(double) -#define ALIGN_MASK (ALIGN_UNIT - 1) -#define MAKE_ALIGN(x) (((x) & ~ALIGN_MASK) + (((x) & ALIGN_MASK) ? ALIGN_UNIT : 0)) - -/* to limit size of XASL trees */ -#define OFFSETS_PER_BLOCK 4096 -#define START_PTR_PER_BLOCK 15 -#define MAX_PTR_BLOCKS 256 -#define PTR_BLOCK(ptr) (((UINTPTR) ptr) / __WORDSIZE) % MAX_PTR_BLOCKS - -/* - * the linear byte stream for store the given XASL tree is allocated - * and expanded dynamically on demand by the following amount of bytes - */ -#define STREAM_EXPANSION_UNIT (OFFSETS_PER_BLOCK * sizeof(int)) +#include "xasl_stream.hpp" #define BYTE_SIZE OR_INT_SIZE #define LONG_SIZE OR_INT_SIZE @@ -64,8 +48,8 @@ #define pack_long or_pack_int /* structure of a visited pointer constant */ -typedef struct visited_ptr VISITED_PTR; -struct visited_ptr +typedef struct xts_visited_ptr XTS_VISITED_PTR; +struct xts_visited_ptr { const void *ptr; /* a pointer constant */ int offset; /* offset where the node pointed by 'ptr' is stored */ @@ -77,7 +61,7 @@ static int xts_Stream_size = 0; /* # of bytes allocated */ static int xts_Free_offset_in_stream = 0; /* blocks of visited pointer constants */ -static VISITED_PTR *xts_Ptr_blocks[MAX_PTR_BLOCKS] = { 0 }; +static XTS_VISITED_PTR *xts_Ptr_blocks[MAX_PTR_BLOCKS] = { 0 }; /* low-water-mark of visited pointers */ static int xts_Ptr_lwm[MAX_PTR_BLOCKS] = { 0 }; @@ -118,6 +102,15 @@ static int xts_save_update_info (const UPDATE_PROC_NODE * ptr); static int xts_save_delete_info (const DELETE_PROC_NODE * ptr); static int xts_save_insert_info (const INSERT_PROC_NODE * ptr); #endif + +// *INDENT-OFF* +template +static int xts_save (const T &t); + +template +static void xts_debug_check (const T &t, char *pack_start, const char *pack_end); +// *INDENT-ON* + static int xts_save_db_value_array (DB_VALUE ** ptr, int size); static int xts_save_int_array (int *ptr, int size); static int xts_save_hfid_array (HFID * ptr, int size); @@ -129,6 +122,9 @@ static int xts_save_upddel_class_info_array (const UPDDEL_CLASS_INFO * classes, static int xts_save_update_assignment_array (const UPDATE_ASSIGNMENT * assigns, int nelements); static int xts_save_odku_info (const ODKU_INFO * odku_info); +static char *xts_process (char *ptr, const json_table_column & json_table_col); +static char *xts_process (char *ptr, const json_table_node & json_table_node); +static char *xts_process (char *ptr, const json_table_spec_node & set_spec); static char *xts_process_xasl_node (char *ptr, const XASL_NODE * xasl); static char *xts_process_xasl_header (char *ptr, const XASL_NODE_HEADER header); static char *xts_process_filter_pred_node (char *ptr, const PRED_EXPR_WITH_CONTEXT * pred); @@ -164,6 +160,7 @@ static char *xts_process_cls_spec_type (char *ptr, const CLS_SPEC_TYPE * cls_spe static char *xts_process_list_spec_type (char *ptr, const LIST_SPEC_TYPE * list_spec); static char *xts_process_showstmt_spec_type (char *ptr, const SHOWSTMT_SPEC_TYPE * list_spec); static char *xts_process_set_spec_type (char *ptr, const SET_SPEC_TYPE * set_spec); +static char *xts_process_json_table_column_behavior (char *ptr, const json_table_column_behavior * behavior); static char *xts_process_method_spec_type (char *ptr, const METHOD_SPEC_TYPE * method_spec); static char *xts_process_rlist_spec_type (char *ptr, const LIST_SPEC_TYPE * list_spec); static char *xts_process_list_id (char *ptr, const QFILE_LIST_ID * list_id); @@ -185,6 +182,9 @@ static char *xts_process_method_sig (char *ptr, const METHOD_SIG * method_sig, i static char *xts_process_connectby_proc (char *ptr, const CONNECTBY_PROC_NODE * connectby_proc); static char *xts_process_regu_value_list (char *ptr, const REGU_VALUE_LIST * regu_value_list); +static int xts_sizeof (const json_table_column & ptr); +static int xts_sizeof (const json_table_node & ptr); +static int xts_sizeof (const json_table_spec_node & ptr); static int xts_sizeof_xasl_node (const XASL_NODE * ptr); static int xts_sizeof_filter_pred_node (const PRED_EXPR_WITH_CONTEXT * ptr); static int xts_sizeof_func_pred (const FUNC_PRED * ptr); @@ -219,6 +219,7 @@ static int xts_sizeof_list_spec_type (const LIST_SPEC_TYPE * ptr); static int xts_sizeof_showstmt_spec_type (const SHOWSTMT_SPEC_TYPE * ptr); static int xts_sizeof_set_spec_type (const SET_SPEC_TYPE * ptr); static int xts_sizeof_method_spec_type (const METHOD_SPEC_TYPE * ptr); +static int xts_sizeof_json_table_column_behavior (const json_table_column_behavior * behavior); static int xts_sizeof_list_id (const QFILE_LIST_ID * ptr); static int xts_sizeof_val_list (const VAL_LIST * ptr); static int xts_sizeof_regu_variable (const REGU_VARIABLE * ptr); @@ -289,7 +290,7 @@ xts_map_xasl_to_stream (const XASL_NODE * xasl_tree, XASL_STREAM * stream) + sizeof (int); /* [size of body data] */ org_offset = offset; - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); xts_reserve_location_in_stream (offset); @@ -379,7 +380,7 @@ xts_map_filter_pred_to_stream (const PRED_EXPR_WITH_CONTEXT * pred, char **pred_ offset = sizeof (int) /* [size of header data] */ + header_size /* [header data] */ + sizeof (int); /* [size of body data] */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); xts_reserve_location_in_stream (offset); @@ -445,7 +446,7 @@ xts_map_func_pred_to_stream (const FUNC_PRED * xasl_tree, char **xasl_stream, in offset = sizeof (int) /* [size of header data] */ + header_size /* [header data] */ + sizeof (int); /* [size of body data] */ - offset = MAKE_ALIGN (offset); + offset = xasl_stream_make_align (offset); xts_reserve_location_in_stream (offset); @@ -4357,7 +4358,7 @@ xts_process_access_spec_type (char *ptr, const ACCESS_SPEC_TYPE * access_spec) ptr = or_pack_int (ptr, access_spec->access); if (access_spec->access == ACCESS_METHOD_SEQUENTIAL || access_spec->access == ACCESS_METHOD_SEQUENTIAL_RECORD_INFO - || access_spec->access == ACCESS_METHOD_SEQUENTIAL_PAGE_SCAN) + || access_spec->access == ACCESS_METHOD_SEQUENTIAL_PAGE_SCAN || access_spec->access == ACCESS_METHOD_JSON_TABLE) { ptr = or_pack_int (ptr, 0); } @@ -4420,6 +4421,10 @@ xts_process_access_spec_type (char *ptr, const ACCESS_SPEC_TYPE * access_spec) ptr = xts_process_method_spec_type (ptr, &ACCESS_SPEC_METHOD_SPEC (access_spec)); break; + case TARGET_JSON_TABLE: + ptr = xts_process (ptr, ACCESS_SPEC_JSON_TABLE_SPEC (access_spec)); + break; + default: xts_Xasl_errcode = ER_QPROC_INVALID_XASLNODE; return NULL; @@ -4755,6 +4760,138 @@ xts_process_set_spec_type (char *ptr, const SET_SPEC_TYPE * set_spec) return ptr; } +static char * +xts_process_json_table_column_behavior (char *ptr, const json_table_column_behavior * behavior) +{ + ptr = or_pack_int (ptr, behavior->m_behavior); + + if (behavior->m_behavior == JSON_TABLE_DEFAULT_VALUE) + { + ptr = xts_process_db_value (ptr, behavior->m_default_value); + } + + return ptr; +} + +static char * +xts_process (char *ptr, const json_table_column & jtc) +{ + int offset; + char *start_ptr = ptr; + + // save json function + ptr = or_pack_int (ptr, jtc.m_function); + + // save output db_value pointer + offset = xts_save_db_value (jtc.m_output_value_pointer); + if (offset == ER_FAILED) + { + return NULL; + } + ptr = or_pack_int (ptr, offset); + + if (jtc.m_function == JSON_TABLE_ORDINALITY) + { + // nothing else required + xts_debug_check (jtc, start_ptr, ptr); + return ptr; + } + + // save domain + ptr = or_pack_domain (ptr, jtc.m_domain, 0, 0); + + // save path + assert (jtc.m_path != NULL && strlen (jtc.m_path) > 0); + offset = xts_save_string (jtc.m_path); + if (offset == ER_FAILED) + { + return NULL; + } + ptr = or_pack_int (ptr, offset); + + // save column_name + assert (jtc.m_column_name != NULL && strlen (jtc.m_column_name) > 0); + offset = xts_save_string (jtc.m_column_name); + if (offset == ER_FAILED) + { + return NULL; + } + ptr = or_pack_int (ptr, offset); + + if (jtc.m_function == JSON_TABLE_EXISTS) + { + xts_debug_check (jtc, start_ptr, ptr); + return ptr; + } + + // save on_error_behavior + ptr = xts_process_json_table_column_behavior (ptr, &jtc.m_on_error); + ptr = xts_process_json_table_column_behavior (ptr, &jtc.m_on_empty); + + xts_debug_check (jtc, start_ptr, ptr); + + return ptr; +} + +static char * +xts_process (char *ptr, const json_table_node & jtn) +{ + int offset; + char *start_ptr = ptr; + + // save string + offset = xts_save_string (jtn.m_path); + if (offset == ER_FAILED) + { + return NULL; + } + ptr = or_pack_int (ptr, offset); + + // save m_output_columns + ptr = or_pack_int (ptr, (int) jtn.m_output_columns_size); + for (size_t i = 0; i < jtn.m_output_columns_size; ++i) + { + ptr = xts_process (ptr, jtn.m_output_columns[i]); + } + + // save nested nodes + ptr = or_pack_int (ptr, (int) jtn.m_nested_nodes_size); + for (size_t i = 0; i < jtn.m_nested_nodes_size; ++i) + { + ptr = xts_process (ptr, jtn.m_nested_nodes[i]); + } + + ptr = or_pack_int (ptr, (int) jtn.m_id); + + ptr = or_pack_int (ptr, (int) jtn.m_expand_type); + + xts_debug_check (jtn, start_ptr, ptr); + + return ptr; +} + +static char * +xts_process (char *ptr, const json_table_spec_node & json_table_spec) +{ + int offset; + char *start_ptr = ptr; + + ptr = or_pack_int (ptr, (int) json_table_spec.m_node_count); + + offset = xts_save_regu_variable (json_table_spec.m_json_reguvar); + if (offset == ER_FAILED) + { + return NULL; + } + ptr = or_pack_int (ptr, offset); + + ptr = xts_process (ptr, *json_table_spec.m_root_node); + + xts_debug_check (json_table_spec, start_ptr, ptr); + + return ptr; +} + static char * xts_process_method_spec_type (char *ptr, const METHOD_SPEC_TYPE * method_spec) { @@ -5086,7 +5223,7 @@ xts_process_aggregate_type (char *ptr, const AGGREGATE_TYPE * aggregate) ptr = or_pack_int (ptr, (int) aggregate->opr_dbtype); - ptr = xts_process_regu_variable (ptr, &aggregate->operand); + ptr = xts_process_regu_variable_list (ptr, aggregate->operands); if (ptr == NULL) { return NULL; @@ -6394,6 +6531,15 @@ xts_sizeof_access_spec_type (const ACCESS_SPEC_TYPE * access_spec) size += tmp_size; break; + case TARGET_JSON_TABLE: + tmp_size = xts_sizeof (ACCESS_SPEC_JSON_TABLE_SPEC (access_spec)); + if (tmp_size == ER_FAILED) + { + return ER_FAILED; + } + size += tmp_size; + break; + default: xts_Xasl_errcode = ER_QPROC_INVALID_XASLNODE; return ER_FAILED; @@ -6572,6 +6718,94 @@ xts_sizeof_method_spec_type (const METHOD_SPEC_TYPE * method_spec) return size; } +static int +xts_sizeof_json_table_column_behavior (const json_table_column_behavior * behavior) +{ + int size = OR_INT_SIZE; // json_table_column_behavior_type + + if (behavior->m_behavior == JSON_TABLE_DEFAULT_VALUE) + { + size += OR_VALUE_ALIGNED_SIZE (behavior->m_default_value); + } + + return size; +} + +static int +xts_sizeof (const json_table_column & json_table_column) +{ + int size = 0; + + size += OR_INT_SIZE; /* m_function */ + size += PTR_SIZE; /* m_output_value_pointer */ + + if (json_table_column.m_function == JSON_TABLE_ORDINALITY) + { + // that's all + return size; + } + + size += or_packed_domain_size (json_table_column.m_domain, 0); /* m_domain */ + size += PTR_SIZE; /* m_path */ + size += PTR_SIZE; /* m_column_name */ + + if (json_table_column.m_function == JSON_TABLE_EXISTS) + { + return size; + } + + size += xts_sizeof_json_table_column_behavior (&json_table_column.m_on_error); + size += xts_sizeof_json_table_column_behavior (&json_table_column.m_on_empty); + + return size; +} + +static int +xts_sizeof (const json_table_node & jtn) +{ + int size = 0; + + size += PTR_SIZE; // m_path + + size += OR_INT_SIZE; // m_output_columns_size + for (size_t i = 0; i < jtn.m_output_columns_size; ++i) + { + size += xts_sizeof (jtn.m_output_columns[i]); // m_output_columns + } + + size += OR_INT_SIZE; // m_nested_nodes_size + for (size_t i = 0; i < jtn.m_nested_nodes_size; ++i) + { + size += xts_sizeof (jtn.m_nested_nodes[i]); // m_nested_nodes + } + + size += OR_INT_SIZE; // m_id + + size += OR_INT_SIZE; // expand type + + return size; +} + +/* + * xts_sizeof_json_table_spec_type () - + * return: + * ptr(in) : + */ +static int +xts_sizeof (const json_table_spec_node & json_table_spec) +{ + int size = 0; + + // reguvar needs to be set + size += (PTR_SIZE /* regu_var */ + + OR_INT_SIZE /* json_table_node number */ + ); /* json_table_node */ + + size += xts_sizeof (*json_table_spec.m_root_node); + + return size; +} + /* * xts_sizeof_list_id () - * return:xts_process_db_value @@ -6801,7 +7035,7 @@ xts_sizeof_aggregate_type (const AGGREGATE_TYPE * aggregate) + OR_INT_SIZE /* option */ + OR_INT_SIZE); /* opr_dbtype */ - tmp_size = xts_sizeof_regu_variable (&aggregate->operand); + tmp_size = xts_sizeof_regu_variable_list (aggregate->operands); if (tmp_size == ER_FAILED) { return ER_FAILED; @@ -7099,23 +7333,23 @@ xts_mark_ptr_visited (const void *ptr, int offset) int new_lwm; int block_no; - block_no = PTR_BLOCK (ptr); + block_no = xasl_stream_get_ptr_block (ptr); new_lwm = xts_Ptr_lwm[block_no]; if (xts_Ptr_max[block_no] == 0) { xts_Ptr_max[block_no] = START_PTR_PER_BLOCK; - xts_Ptr_blocks[block_no] = (VISITED_PTR *) malloc (sizeof (VISITED_PTR) * xts_Ptr_max[block_no]); + xts_Ptr_blocks[block_no] = (XTS_VISITED_PTR *) malloc (sizeof (XTS_VISITED_PTR) * xts_Ptr_max[block_no]); } else if (xts_Ptr_max[block_no] <= new_lwm) { xts_Ptr_max[block_no] *= 2; xts_Ptr_blocks[block_no] = - (VISITED_PTR *) realloc (xts_Ptr_blocks[block_no], sizeof (VISITED_PTR) * xts_Ptr_max[block_no]); + (XTS_VISITED_PTR *) realloc (xts_Ptr_blocks[block_no], sizeof (XTS_VISITED_PTR) * xts_Ptr_max[block_no]); } - if (xts_Ptr_blocks[block_no] == (VISITED_PTR *) NULL) + if (xts_Ptr_blocks[block_no] == (XTS_VISITED_PTR *) NULL) { xts_Xasl_errcode = ER_OUT_OF_VIRTUAL_MEMORY; return ER_FAILED; @@ -7145,7 +7379,7 @@ xts_get_offset_visited_ptr (const void *ptr) int block_no; int element_no; - block_no = PTR_BLOCK (ptr); + block_no = xasl_stream_get_ptr_block (ptr); if (xts_Ptr_lwm[block_no] <= 0) { @@ -7196,7 +7430,7 @@ xts_reserve_location_in_stream (int size) int grow; int org_size = size; - size = MAKE_ALIGN (size); + size = xasl_stream_make_align (size); needed = size - (xts_Stream_size - xts_Free_offset_in_stream); if (needed >= 0) @@ -7269,3 +7503,85 @@ xts_process_regu_variable_list (char *ptr, const REGU_VARIABLE_LIST regu_var_lis return ptr; } + +// *INDENT-OFF* +// +// xts_save () - template function to pack structure into XASL stream buffer and save its offset +// +// template T - type having an overload of xts_sizeof and xts_process functions +// +// return : offset +// t (in) : +// +template +int static +xts_save (const T &t) +{ + int packed_length; + char *ptr; + + int offset = xts_get_offset_visited_ptr (&t); + if (offset != ER_FAILED) + { + return offset; + } + + packed_length = xts_reserve_location_in_stream (xts_sizeof (t)); + + offset = xts_reserve_location_in_stream (packed_length); + if (offset == ER_FAILED || xts_mark_ptr_visited (&t, offset) == ER_FAILED) + { + return ER_FAILED; + } + ptr = &xts_Stream_buffer[offset]; + ptr = xts_process (ptr, t); + + return offset; +} + +template +static void +xts_debug_check (const T &t, char *pack_start, const char *pack_end) +{ +#if !defined (NDEBUG) + + // check for common mistakes: + // + // 1. size underestimation: + // buffer overflow may occur + // + // 2. pack_end does not match unpack_end + // if unpack_end is not same as pack_end, building next structure will start at the wrong offset + // + // 3. data consistency + // check original data is same as resulted data after pack/unpack + // + + stx_init_xasl_unpack_info (NULL, xts_Stream_buffer, xts_Stream_size); + + // check sizeof is correct + std::size_t buf_size = pack_end - pack_start; + std::size_t estimate_size = xts_sizeof (t); + assert (buf_size <= estimate_size); // estimation should be accurate or pessimistic + + // build object from packed data + T unpack_t; + char * unpack_end = stx_build (NULL, pack_start, unpack_t); + if (unpack_end != pack_end) + { + // this leads to build corruption + assert (false); + } + + if (!xasl_stream_compare (t, unpack_t)) + { + // data is not consistent + assert (false); + } + + xasl_unpack_info* unpack_info = stx_get_xasl_unpack_info_ptr (NULL); + db_private_free_and_init (NULL, unpack_info); + stx_set_xasl_unpack_info_ptr (NULL, NULL); +#endif // DEBUG +} +// *INDENT-ON* diff --git a/src/storage/btree.c b/src/storage/btree.c index a54e14136f9..bbc314e8859 100644 --- a/src/storage/btree.c +++ b/src/storage/btree.c @@ -72,7 +72,6 @@ #define BTREE_SPLIT_DEFAULT_PIVOT 0.5f #define DISK_PAGE_BITS (DB_PAGESIZE * CHAR_BIT) /* Num of bits per page */ -#define RESERVED_SIZE_IN_PAGE sizeof(FILEIO_PAGE_RESERVED) #define BTREE_NODE_MAX_SPLIT_SIZE(thread_p, page_ptr) \ (db_page_size() - spage_header_size() - spage_get_space_for_record(thread_p, (page_ptr), HEADER)) @@ -887,103 +886,116 @@ struct btree_delete_helper #define BTREE_DELETE_MVCC_INFO(helper) \ (&((helper)->object_info.mvcc_info)) -/* Performance tracking macro's. */ -#define BTREE_PERF_TRACK_TIME(thread_p, helper) \ - do \ - { \ - PERF_UTIME_TRACKER_TIME (thread_p, &(helper)->time_track, PSTAT_BT_LEAF); \ - switch ((helper)->purpose) \ - { \ - case BTREE_OP_INSERT_NEW_OBJECT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_INSERT); \ - break; \ - case BTREE_OP_INSERT_MVCC_DELID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_MVCC_DELETE); \ - break; \ - case BTREE_OP_INSERT_MARK_DELETED: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_MARK_DELETE); \ - break; \ - case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_DELETE); \ - break; \ - case BTREE_OP_DELETE_OBJECT_PHYSICAL: \ - case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_DELETE); \ - break; \ - case BTREE_OP_DELETE_UNDO_INSERT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_INSERT); \ - break; \ - case BTREE_OP_DELETE_UNDO_INSERT_DELID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_MVCC_DELETE); \ - break; \ - case BTREE_OP_DELETE_VACUUM_OBJECT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_VACUUM); \ - break; \ - case BTREE_OP_DELETE_VACUUM_INSID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_VACUUM_INSID); \ - break; \ - default: \ - assert (false); \ - } \ - } \ - while (false) +// Performance tracking template functions +// Helper is either BTREE_INSERT_HELPER or BTREE_DELETE_HELPER +template < typename Helper > static inline void +btree_perf_track_time (THREAD_ENTRY * thread_p, Helper * helper) +{ + PERF_UTIME_TRACKER_TIME (thread_p, &helper->time_track, PSTAT_BT_LEAF); + switch (helper->purpose) + { + case BTREE_OP_INSERT_NEW_OBJECT: + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_INSERT); + break; + case BTREE_OP_INSERT_MVCC_DELID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_MVCC_DELETE); + break; + case BTREE_OP_INSERT_MARK_DELETED: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_MARK_DELETE); + break; + case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_DELETE); + break; + case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_DELETE); + break; + case BTREE_OP_DELETE_UNDO_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_INSERT); + break; + case BTREE_OP_DELETE_UNDO_INSERT_DELID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_MVCC_DELETE); + break; + case BTREE_OP_DELETE_VACUUM_OBJECT: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_VACUUM); + break; + case BTREE_OP_DELETE_VACUUM_INSID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_VACUUM_INSID); + break; + default: + assert (false); + } +} -#define BTREE_PERF_TRACK_TRAVERSE_TIME(thread_p, helper) \ - do \ - { \ - PERF_UTIME_TRACKER_TIME (thread_p, &(helper)->time_track, PSTAT_BT_TRAVERSE); \ - switch ((helper)->purpose) \ - { \ - case BTREE_OP_INSERT_NEW_OBJECT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_INSERT_TRAVERSE); \ - break; \ - case BTREE_OP_INSERT_MVCC_DELID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_MVCC_DELETE_TRAVERSE); \ - break; \ - case BTREE_OP_INSERT_MARK_DELETED: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_MARK_DELETE_TRAVERSE); \ - break; \ - case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_DELETE_TRAVERSE); \ - break; \ - case BTREE_OP_DELETE_OBJECT_PHYSICAL: \ - case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_DELETE_TRAVERSE); \ - break; \ - case BTREE_OP_DELETE_UNDO_INSERT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_INSERT_TRAVERSE); \ - break; \ - case BTREE_OP_DELETE_UNDO_INSERT_DELID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_UNDO_MVCC_DELETE_TRAVERSE); \ - break; \ - case BTREE_OP_DELETE_VACUUM_OBJECT: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_VACUUM_TRAVERSE); \ - break; \ - case BTREE_OP_DELETE_VACUUM_INSID: \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, PSTAT_BT_VACUUM_INSID_TRAVERSE); \ - break; \ - default: \ - assert (false); \ - } \ - } \ - while (false) +template < typename Helper > static inline void +btree_perf_track_traverse_time (THREAD_ENTRY * thread_p, Helper * helper) +{ + PERF_UTIME_TRACKER_TIME (thread_p, &helper->time_track, PSTAT_BT_TRAVERSE); + switch (helper->purpose) + { + case BTREE_OP_INSERT_NEW_OBJECT: + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_INSERT_TRAVERSE); + break; + case BTREE_OP_INSERT_MVCC_DELID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_MVCC_DELETE_TRAVERSE); + break; + case BTREE_OP_INSERT_MARK_DELETED: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_MARK_DELETE_TRAVERSE); + break; + case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_DELETE_TRAVERSE); + break; + case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_DELETE_TRAVERSE); + break; + case BTREE_OP_DELETE_UNDO_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_INSERT_TRAVERSE); + break; + case BTREE_OP_DELETE_UNDO_INSERT_DELID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_UNDO_MVCC_DELETE_TRAVERSE); + break; + case BTREE_OP_DELETE_VACUUM_OBJECT: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_VACUUM_TRAVERSE); + break; + case BTREE_OP_DELETE_VACUUM_INSID: + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &helper->time_track, PSTAT_BT_VACUUM_INSID_TRAVERSE); + break; + default: + assert (false); + } +} -#define BTREE_PERF_OVF_OIDS_FIX_TIME(thread_p, track) \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_FIX_OVF_OIDS) +static inline void +btree_perf_ovf_oids_fix_time (THREAD_ENTRY * thread_p, PERF_UTIME_TRACKER * track) +{ + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_FIX_OVF_OIDS); +} -#define BTREE_PERF_UNIQUE_LOCK_TIME(thread_p, track, lock) \ - do \ - { \ - if ((lock) == S_LOCK) \ - { \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_UNIQUE_RLOCKS); \ - } \ - else \ - { \ - PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_UNIQUE_WLOCKS ); \ - } \ - } \ - while (false) +static inline void +btree_perf_unique_lock_time (THREAD_ENTRY * thread_p, PERF_UTIME_TRACKER * track, LOCK lock) +{ + if (lock == S_LOCK) + { + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_UNIQUE_RLOCKS); + } + else + { + PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, track, PSTAT_BT_UNIQUE_WLOCKS); + } +} /* B-tree redo recovery flags. They are additional to * LOG_RV_RECORD_MODIFY_MASK. @@ -1220,6 +1232,28 @@ typedef enum btree_rv_debug_id BTREE_RV_DEBUG_ID; new_size, \ BTID_AS_ARGS (btid) +/* + * Online index loading + */ + +/* Online index states */ +/* Include MVCCID_ALL_VISIBLE when we set a flag. */ +const MVCCID BTREE_ONLINE_INDEX_NORMAL_FLAG_STATE = MVCCID_ALL_VISIBLE; +const MVCCID BTREE_ONLINE_INDEX_INSERT_FLAG_STATE = 0x4000000000000000 | MVCCID_ALL_VISIBLE; +const MVCCID BTREE_ONLINE_INDEX_DELETE_FLAG_STATE = 0x8000000000000000 | MVCCID_ALL_VISIBLE; +const MVCCID BTREE_ONLINE_INDEX_FLAG_MASK = 0xC000000000000000; +const MVCCID BTREE_ONLINE_INDEX_MVCCID_MASK = ~0xC000000000000000; + +typedef struct btree_helper BTREE_HELPER; +struct btree_helper +{ + BTREE_INSERT_HELPER insert_helper; + BTREE_DELETE_HELPER delete_helper; +}; + +#define BTREE_HELPER_INITIALIZER \ + { BTREE_INSERT_HELPER_INITIALIZER, BTREE_DELETE_HELPER_INITIALIZER } + /* * Static functions */ @@ -1245,12 +1279,12 @@ static void btree_read_fixed_portion_of_non_leaf_record (RECDES * rec, NON_LEAF_ static void btree_write_fixed_portion_of_non_leaf_record_to_orbuf (OR_BUF * buf, NON_LEAF_REC * nlf_rec); static int btree_read_fixed_portion_of_non_leaf_record_from_orbuf (OR_BUF * buf, NON_LEAF_REC * nlf_rec); static void btree_append_oid (RECDES * rec, OID * oid); -STATIC_INLINE void btree_add_mvcc_delid (RECDES * rec, int oid_offset, int mvcc_delid_offset, MVCCID * p_mvcc_delid, - char **rv_undo_data_ptr, char **rv_redo_data_ptr) - __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE void btree_set_mvcc_delid (RECDES * rec, int mvcc_delid_offset, MVCCID * p_mvcc_delid, - char **rv_undo_data_ptr, char **rv_redo_data_ptr) - __attribute__ ((ALWAYS_INLINE)); +STATIC_INLINE void btree_add_mvccid (RECDES * rec, int oid_offset, int mvccid_offset, MVCCID mvccid, short flag, + char **rv_undo_data_ptr, char **rv_redo_data_ptr) __attribute__ ((ALWAYS_INLINE)); +STATIC_INLINE void btree_set_mvccid (RECDES * rec, int mvccid_offset, MVCCID * p_mvccid, + char **rv_undo_data_ptr, char **rv_redo_data_ptr) __attribute__ ((ALWAYS_INLINE)); +static inline void btree_remove_mvccid (RECDES * record, int oid_offset, int mvccid_offset, short flag, + char **rv_undo_data_ptr, char **rv_redo_data_ptr); static void btree_record_append_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * record, BTREE_NODE_TYPE node_type, BTREE_OBJECT_INFO * object_info, char **rv_undo_data_ptr, char **rv_redo_data_ptr); @@ -1548,6 +1582,9 @@ static int btree_get_max_new_data_size (THREAD_ENTRY * thread_p, BTID_INT * btid static int btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, void *other_args); +static int btree_key_online_index_IB_insert (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, + void *other_args); static int btree_key_insert_new_key (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, PAGE_PTR leaf_page, BTREE_INSERT_HELPER * insert_helper, BTREE_SEARCH_KEY_HELPER * search_key); static int btree_key_find_and_insert_delete_mvccid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, @@ -1692,6 +1729,57 @@ STATIC_INLINE void btree_insert_sysop_end (THREAD_ENTRY * thread_p, BTREE_INSERT STATIC_INLINE const char *btree_purpose_to_string (BTREE_OP_PURPOSE purpose) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE const char *btree_op_type_to_string (int op_type) __attribute__ ((ALWAYS_INLINE)); +static bool btree_is_class_oid_packed (BTID_INT * btid_int, RECDES * record, BTREE_NODE_TYPE node_type, bool is_first); +static bool btree_is_fixed_size (BTID_INT * btid_int, RECDES * record, BTREE_NODE_TYPE node_type, bool is_first); +static bool btree_is_insert_data_purpose (BTREE_OP_PURPOSE purpose); +static bool btree_is_insert_object_purpose (BTREE_OP_PURPOSE purpose); +static bool btree_is_insert_delid_purpose (BTREE_OP_PURPOSE purpose); +static bool btree_is_delete_data_purpose (BTREE_OP_PURPOSE purpose); +static bool btree_is_delete_object_purpose (BTREE_OP_PURPOSE purpose); +static void btree_rv_log_delete_object (THREAD_ENTRY * thread_p, const BTREE_DELETE_HELPER & delete_helper, + LOG_DATA_ADDR & addr, int undo_length, int redo_length, const char *undo_data, + const char *redo_data); +static void btree_rv_log_insert_object (THREAD_ENTRY * thread_p, const BTREE_INSERT_HELPER & insert_helper, + LOG_DATA_ADDR & addr, int undo_length, int redo_length, const char *undo_data, + const char *redo_data); + +static inline void btree_online_index_check_state (MVCCID state); +static inline bool btree_online_index_is_insert_flag_state (MVCCID state); +static inline bool btree_online_index_is_delete_flag_state (MVCCID state); +static inline bool btree_online_index_is_normal_state (MVCCID state); +static inline void btree_online_index_set_insert_flag_state (MVCCID & state); +static inline void btree_online_index_set_delete_flag_state (MVCCID & state); +static inline void btree_online_index_set_normal_state (MVCCID & state); +static void btree_online_index_change_state (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * record, + BTREE_NODE_TYPE node_type, int offset_to_object, MVCCID new_state, + char **rv_undo_data, char **rv_redo_data); + +static int btree_find_oid_with_page_and_record (THREAD_ENTRY * thread_p, BTID_INT * btid_int, OID * oid, + PAGE_PTR leaf_page, BTREE_OP_PURPOSE purpose, + BTREE_MVCC_INFO * match_mvccinfo, RECDES * record, LEAF_REC * leaf_info, + int offset_after_key, PAGE_PTR * found_page, PAGE_PTR * prev_page, + int *offset_to_object, BTREE_MVCC_INFO * object_mvcc_info, + RECDES * new_record); + +static int btree_key_online_index_tran_insert (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, + bool * restart, void *other_args); + +static int btree_key_online_index_tran_insert_DF (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, + bool * restart, void *other_args); + +static int btree_key_online_index_tran_delete (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, + bool * restart, void *other_args); + +static inline void btree_insert_helper_to_delete_helper (BTREE_INSERT_HELPER * insert_helper, + BTREE_DELETE_HELPER * delete_helper); +static inline void btree_delete_helper_to_insert_helper (BTREE_DELETE_HELPER * delete_helper, + BTREE_INSERT_HELPER * insert_helper); + +static inline bool btree_is_online_index_loading (BTREE_OP_PURPOSE purpose); + /* * btree_fix_root_with_info () - Fix b-tree root page and output its VPID, header and b-tree info if requested. * @@ -1834,10 +1922,24 @@ btree_clear_key_value (bool * clear_flag, DB_VALUE * key_value) pr_clear_value (key_value); *clear_flag = false; } - + // also set null + db_make_null (key_value); return *clear_flag; } +/* + * btree_init_temp_key_value () - + * return: void + * clear_flag (in/out): + * key_value (in/out): + */ +void +btree_init_temp_key_value (bool * clear_flag, DB_VALUE * key_value) +{ + db_make_null (key_value); + *clear_flag = false; +} + /* * btree_create_overflow_key_file () - Create file for overflow keyes * @@ -2806,8 +2908,9 @@ btree_leaf_change_first_object (THREAD_ENTRY * thread_p, RECDES * recp, BTID_INT /* Add MVCC info */ if (new_has_insid) { - assert (MVCCID_IS_VALID (mvcc_info->insert_mvccid) - && MVCC_ID_PRECEDES (mvcc_info->insert_mvccid, log_Gl.hdr.mvcc_next_id)); + assert (!btree_online_index_is_normal_state (mvcc_info->insert_mvccid) + || (MVCCID_IS_VALID (mvcc_info->insert_mvccid) + && MVCC_ID_PRECEDES (mvcc_info->insert_mvccid, log_Gl.hdr.mvcc_next_id))); /* Add insert MVCCID */ if (or_put_mvccid (&buffer, mvcc_info->insert_mvccid) != NO_ERROR) { @@ -3497,30 +3600,30 @@ btree_append_oid (RECDES * rec, OID * oid) } /* - * btree_add_mvcc_delid () - Add delete MVCCID in b-tree record. + * btree_add_mvccid () - Add insert/delete MVCCID in b-tree record. * * return : Void. * rec (in) : B-tree record. * oid_offset (in) : Offset to object (where MVCC flag is set). - * mvcc_delid_offset (in) : Add MVCCID at this offset. - * p_mvcc_delid (in) : Pointer to MVCCID value. + * mvccid_offset (in) : Add MVCCID at this offset + * mvccid (in) : MVCCID value + * flag (in) : MVCCID flag for has insert or delete * rv_undo_data_ptr (out) : Outputs undo recovery data for changing the record. * rv_redo_data_ptr (out) : Outputs redo recovery data for changing the record. */ STATIC_INLINE void -btree_add_mvcc_delid (RECDES * rec, int oid_offset, int mvcc_delid_offset, MVCCID * p_mvcc_delid, - char **rv_undo_data_ptr, char **rv_redo_data_ptr) +btree_add_mvccid (RECDES * rec, int oid_offset, int mvccid_offset, MVCCID mvccid, short flag, + char **rv_undo_data_ptr, char **rv_redo_data_ptr) { int dest_offset; - char *mvcc_delid_ptr = NULL; + char *mvccid_dest_ptr = NULL; char *oid_ptr = NULL; - assert (rec != NULL && p_mvcc_delid != NULL && oid_offset >= 0 && mvcc_delid_offset > 0 - && oid_offset < mvcc_delid_offset); - assert (!btree_record_object_is_flagged (rec->data + oid_offset, BTREE_OID_HAS_MVCC_DELID)); + assert (rec != NULL && oid_offset >= 0 && mvccid_offset > 0 && oid_offset < mvccid_offset); + assert (!btree_record_object_is_flagged (rec->data + oid_offset, flag)); assert (rec->length + OR_MVCCID_SIZE < rec->area_size); - dest_offset = mvcc_delid_offset + OR_MVCCID_SIZE; + dest_offset = mvccid_offset + OR_MVCCID_SIZE; if (rv_undo_data_ptr != NULL && *rv_undo_data_ptr != NULL) { @@ -3529,19 +3632,18 @@ btree_add_mvcc_delid (RECDES * rec, int oid_offset, int mvcc_delid_offset, MVCCI log_rv_pack_undo_record_changes (*rv_undo_data_ptr, oid_offset + OR_OID_VOLID, OR_SHORT_SIZE, OR_SHORT_SIZE, rec->data + oid_offset + OR_OID_VOLID); /* Undo logging: added MVCCID. */ - *rv_undo_data_ptr = - log_rv_pack_undo_record_changes (*rv_undo_data_ptr, mvcc_delid_offset, 0, OR_MVCCID_SIZE, NULL); + *rv_undo_data_ptr = log_rv_pack_undo_record_changes (*rv_undo_data_ptr, mvccid_offset, 0, OR_MVCCID_SIZE, NULL); } - RECORD_MOVE_DATA (rec, dest_offset, mvcc_delid_offset); + RECORD_MOVE_DATA (rec, dest_offset, mvccid_offset); /* Set MVCC flag. */ oid_ptr = rec->data + oid_offset; - btree_record_object_set_mvcc_flags (oid_ptr, BTREE_OID_HAS_MVCC_DELID); + btree_record_object_set_mvcc_flags (oid_ptr, flag); - /* Set delete MVCCID. */ - mvcc_delid_ptr = rec->data + mvcc_delid_offset; - OR_PUT_MVCCID (mvcc_delid_ptr, p_mvcc_delid); + /* Set MVCCID. */ + mvccid_dest_ptr = rec->data + mvccid_offset; + OR_PUT_MVCCID (mvccid_dest_ptr, &mvccid); if (rv_redo_data_ptr != NULL && *rv_redo_data_ptr != NULL) { @@ -3551,46 +3653,88 @@ btree_add_mvcc_delid (RECDES * rec, int oid_offset, int mvcc_delid_offset, MVCCI rec->data + oid_offset + OR_OID_VOLID); /* Redo logging: added MVCCID. */ *rv_redo_data_ptr = - log_rv_pack_redo_record_changes (*rv_redo_data_ptr, mvcc_delid_offset, 0, OR_MVCCID_SIZE, mvcc_delid_ptr); + log_rv_pack_redo_record_changes (*rv_redo_data_ptr, mvccid_offset, 0, OR_MVCCID_SIZE, mvccid_dest_ptr); } } /* - * btree_set_mvcc_delid () - Set delete MVCCID instead of existing one. + * btree_set_mvccid () - Set MVCCID instead of existing one. This one works for insid and delid. * * return : Error code. * rec (in) : Record data. - * mvcc_delid_offset (in) : Offset of old delete MVCCID. - * p_mvcc_delid (in) : New delete MVCCID. + * mvccid_offset (in) : Offset of old MVCCID. + * p_mvccid (in) : New MVCCID. * rv_undo_data_ptr (in) : Outputs undo recovery data for changing the record. * rv_redo_data_ptr (in) : Outputs redo recovery data for changing the record. */ STATIC_INLINE void -btree_set_mvcc_delid (RECDES * rec, int mvcc_delid_offset, MVCCID * p_mvcc_delid, char **rv_undo_data_ptr, - char **rv_redo_data_ptr) +btree_set_mvccid (RECDES * rec, int mvccid_offset, MVCCID * p_mvccid, char **rv_undo_data_ptr, char **rv_redo_data_ptr) { - char *mvcc_delid_ptr = NULL; + char *mvccid_ptr = NULL; - assert (rec != NULL && mvcc_delid_offset > 0 && p_mvcc_delid != NULL); + assert (rec != NULL && mvccid_offset > 0 && p_mvccid != NULL); - mvcc_delid_ptr = rec->data + mvcc_delid_offset; + mvccid_ptr = rec->data + mvccid_offset; if (rv_undo_data_ptr != NULL && *rv_undo_data_ptr != NULL) { /* Redo logging: replace MVCCID. */ *rv_undo_data_ptr = - log_rv_pack_undo_record_changes (*rv_undo_data_ptr, mvcc_delid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvcc_delid_ptr); + log_rv_pack_undo_record_changes (*rv_undo_data_ptr, mvccid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, mvccid_ptr); } - OR_PUT_MVCCID (mvcc_delid_ptr, p_mvcc_delid); + OR_PUT_MVCCID (mvccid_ptr, p_mvccid); if (rv_redo_data_ptr != NULL && *rv_redo_data_ptr != NULL) { /* Redo logging: replace MVCCID. */ *rv_redo_data_ptr = - log_rv_pack_redo_record_changes (*rv_redo_data_ptr, mvcc_delid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvcc_delid_ptr); + log_rv_pack_redo_record_changes (*rv_redo_data_ptr, mvccid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, mvccid_ptr); + } +} + +// +// btree_remove_mvccid () - remove insert or delete MVCCID from record and generate incremental logging +// +// record (in/out) : b-tree record +// oid_offset (in) : offset to object OID +// mvccid_offset (in) : offset to MVCCID being removed +// flag (in) : has insert or has delete flag +// rv_undo_data_ptr (in/out) : if not null, output undo logging +// rv_redo_data_ptr (in/out) : if not null, output redo logging +// +static inline void +btree_remove_mvccid (RECDES * record, int oid_offset, int mvccid_offset, short flag, char **rv_undo_data_ptr, + char **rv_redo_data_ptr) +{ + char *oid_ptr = record->data + oid_offset; + char *mvccid_ptr = record->data + mvccid_offset; + + if (rv_undo_data_ptr != NULL && *rv_undo_data_ptr != NULL) + { + /* Undo logging: remove MVCCID. */ + *rv_undo_data_ptr = + log_rv_pack_undo_record_changes (*rv_undo_data_ptr, mvccid_offset, OR_MVCCID_SIZE, 0, mvccid_ptr); + + /* Undo logging: clear flag. */ + *rv_undo_data_ptr = + log_rv_pack_undo_record_changes (*rv_undo_data_ptr, oid_offset + OR_OID_VOLID, OR_SHORT_SIZE, + OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); + } + + /* Remove. */ + RECORD_MOVE_DATA (record, mvccid_offset, mvccid_offset + OR_MVCCID_SIZE); + btree_record_object_clear_mvcc_flags (oid_ptr, flag); + + if (rv_redo_data_ptr != NULL && *rv_redo_data_ptr != NULL) + { + /* Redo logging: remove MVCCID. */ + *rv_redo_data_ptr = log_rv_pack_redo_record_changes (*rv_redo_data_ptr, mvccid_offset, OR_MVCCID_SIZE, 0, NULL); + + /* Redo logging: clear flag. */ + *rv_redo_data_ptr = + log_rv_pack_redo_record_changes (*rv_redo_data_ptr, oid_offset + OR_OID_VOLID, OR_SHORT_SIZE, + OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); } } @@ -4111,6 +4255,7 @@ btree_read_record (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR pgptr, REC LEAF_REC leaf_pnt; int dummy_offset; + btree_init_temp_key_value (&lf_clear_key, &lf_key); (void) spage_get_record (thread_p, pgptr, 1, &peek_rec, PEEK); error = btree_read_record_without_decompression (thread_p, btid, &peek_rec, &lf_key, &leaf_pnt, BTREE_LEAF_NODE, &lf_clear_key, &dummy_offset, @@ -4173,7 +4318,7 @@ btree_read_record_without_decompression (THREAD_ENTRY * thread_p, BTID_INT * bti if (key != NULL) { - db_make_null (key); + btree_clear_key_value (clear_key, key); } *clear_key = false; @@ -4513,10 +4658,7 @@ static void btree_dump_leaf_record (THREAD_ENTRY * thread_p, FILE * fp, BTID_INT * btid, RECDES * rec, int depth) { OR_BUF buf; - LEAF_REC leaf_record = { - { - NULL_PAGEID, NULL_VOLID}, 0 - }; + LEAF_REC leaf_record = { {NULL_PAGEID, NULL_VOLID}, 0 }; int i, k, oid_cnt; OID class_oid; OID oid; @@ -4529,6 +4671,8 @@ btree_dump_leaf_record (THREAD_ENTRY * thread_p, FILE * fp, BTID_INT * btid, REC int error; BTREE_MVCC_INFO mvcc_info; + btree_init_temp_key_value (&clear_key, &key); + if (BTREE_IS_UNIQUE (btid->unique_pk)) { oid_size = (2 * OR_OID_SIZE); @@ -4780,6 +4924,8 @@ btree_dump_non_leaf_record (THREAD_ENTRY * thread_p, FILE * fp, BTID_INT * btid, VPID_SET_NULL (&(non_leaf_record.pnt)); + btree_init_temp_key_value (&clear_key, &key); + /* output the non_leaf record structure content */ error = btree_read_record_without_decompression (thread_p, btid, rec, &key, &non_leaf_record, BTREE_NON_LEAF_NODE, @@ -4889,6 +5035,8 @@ btree_search_nonleaf_page (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR pa /* initialize child page identifier */ VPID_SET_NULL (child_vpid); + btree_init_temp_key_value (&clear_key, &temp_key); + #if !defined(NDEBUG) if (!page_ptr || !key || DB_IS_NULL (key)) { @@ -5063,7 +5211,7 @@ btree_leaf_is_key_between_min_max (THREAD_ENTRY * thread_p, BTID_INT * btid_int, * Compare with first key in page. */ /* Read record and get key. */ - db_make_null (&border_key); + btree_init_temp_key_value (&clear_key, &border_key); if (spage_get_record (thread_p, leaf, 1, &border_record, PEEK) != S_SUCCESS) { @@ -5208,6 +5356,8 @@ btree_search_leaf_page (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_ assert (page_ptr != NULL); assert (search_key != NULL); + btree_init_temp_key_value (&clear_key, &temp_key); + /* Initialize search results. */ search_key->result = BTREE_KEY_NOTFOUND; search_key->slotid = NULL_SLOTID; @@ -6150,6 +6300,8 @@ btree_get_subtree_stats (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page stats_env->stat_info->keys += key_cnt; stats_env->stat_info->height = 1; /* init */ + btree_init_temp_key_value (&clear_key, &key); + if (stats_env->pkeys) { if (TP_DOMAIN_TYPE (key_type) != DB_TYPE_MIDXKEY) @@ -6343,7 +6495,7 @@ btree_get_stats_key (THREAD_ENTRY * thread_p, BTREE_STATS_ENV * env, MVCC_SNAPSH assert (env != NULL); - db_make_null (&key_value); + btree_init_temp_key_value (&clear_key, &key_value); if (mvcc_snapshot != NULL) { @@ -6934,6 +7086,24 @@ btree_get_stats (THREAD_ENTRY * thread_p, BTREE_STATS * stat_info_p, bool with_f } } + if (npages < env->stat_info->height) + { + // this is a corner case. if b-tree had only one page when npages was read, but its root was split immediately + // after, we'd have this awkward situation. + // + // but we may read npages again, and this time it should be better (we rely also on the fact that one root is + // split, it is never merged back to one page again). + // + ret = file_get_num_user_pages (thread_p, &(stat_info_p->btid.vfid), &npages); + if (ret != NO_ERROR) + { + ASSERT_ERROR (); + return ret; + } + assert_release (npages >= 1); + assert_release (npages >= env->stat_info->height); + } + /* check for leaf pages */ env->stat_info->leafs = MAX (1, env->stat_info->leafs); env->stat_info->leafs = MIN (env->stat_info->leafs, npages - (env->stat_info->height - 1)); @@ -7043,8 +7213,8 @@ btree_check_page_key (THREAD_ENTRY * thread_p, const OID * class_oid_p, BTID_INT nleaf_pnt.key_len = 0; VPID_SET_NULL (&nleaf_pnt.pnt); - db_make_null (&key1); - db_make_null (&key2); + btree_init_temp_key_value (&clear_key1, &key1); + btree_init_temp_key_value (&clear_key2, &key2); key_cnt = btree_node_number_of_keys (thread_p, page_ptr); @@ -7246,6 +7416,7 @@ btree_verify_subtree (THREAD_ENTRY * thread_p, const OID * class_oid_p, BTID_INT char err_buf[LINE_MAX]; db_make_null (&INFO2.max_key); + btree_init_temp_key_value (&clear_key, &curr_key); /* test the page for the order of the keys within the page and get the biggest key of this page */ valid = btree_check_page_key (thread_p, class_oid_p, btid, btname, pg_ptr, pg_vpid); @@ -8302,6 +8473,8 @@ btree_get_subtree_capacity (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR p leaf_pnt.key_len = 0; VPID_SET_NULL (&leaf_pnt.ovfl); + btree_init_temp_key_value (&clear_key, &key1); + /* initialize capacity structure */ cpc->dis_key_cnt = 0; cpc->tot_val_cnt = 0; @@ -9012,39 +9185,8 @@ btree_delete_key_from_leaf (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR l assert (!BTREE_RV_HAS_DEBUG_INFO (delete_helper->leaf_addr.offset)); /* Add logging. */ - if (delete_helper->is_system_op_started) - { - /* We need undoredo logging. */ - - /* TODO: Add debugging info for undo. */ - log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &delete_helper->leaf_addr, leaf_record.length, 0, - leaf_record.data, NULL); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL) - { - /* Undo-redo logging. No actual redo data, since the record is removed completely (and only a flag saved in - * leaf_addr.offset is used. */ - log_append_undoredo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &delete_helper->leaf_addr, - delete_helper->rv_keyval_data_length, 0, delete_helper->rv_keyval_data, NULL); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT) - { - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (leaf_pg), - delete_helper->leaf_addr.offset, leaf_pg, 0, NULL, - LOG_FIND_CURRENT_TDES (thread_p), &delete_helper->reference_lsa); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED) - { - log_append_run_postpone (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &delete_helper->leaf_addr, - pgbuf_get_vpid_ptr (leaf_pg), 0, NULL, &delete_helper->reference_lsa); - } - else /* BTREE_OP_DELETE_VACUUM_OBJECT */ - { - /* We now know everything is successfully executed. Vacuum no longer needs undo logging. */ - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT); - log_append_redo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &delete_helper->leaf_addr, 0, NULL); - } - pgbuf_set_dirty (thread_p, leaf_pg, DONT_FREE); + btree_rv_log_delete_object (thread_p, *delete_helper, delete_helper->leaf_addr, leaf_record.length, 0, + leaf_record.data, NULL); btree_delete_log (delete_helper, BTREE_DELETE_MODIFY_MSG ("removed key"), BTREE_DELETE_MODIFY_ARGS (thread_p, delete_helper, leaf_pg, &prev_lsa, true, search_key->slotid, 0, @@ -9121,11 +9263,7 @@ btree_replace_first_oid_with_ovfl_oid (THREAD_ENTRY * thread_p, BTID_INT * btid, assert (delete_helper != NULL); assert (leaf_rec != NULL); assert (ovfl_vpid != NULL); - assert (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD - || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT); + assert (btree_is_delete_object_purpose (delete_helper->purpose)); assert (delete_helper->rv_redo_data != NULL); assert (delete_helper->leaf_addr.offset != 0 && delete_helper->leaf_addr.pgptr == leaf_page); @@ -9960,8 +10098,8 @@ btree_merge_node (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR P, PAGE_PTR right_header = btree_get_node_header (thread_p, right_pg); assert (left_header != NULL && right_header != NULL); - db_make_null (&left_fence_key); - db_make_null (&right_fence_key); + btree_init_temp_key_value (&left_fence_key_clear, &left_fence_key); + btree_init_temp_key_value (&right_fence_key_clear, &right_fence_key); left_used = btree_node_size_uncompressed (thread_p, btid, left_pg); if (left_used < 0) @@ -10388,6 +10526,8 @@ btree_node_size_uncompressed (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR LEAF_REC leaf_pnt; int error; + btree_init_temp_key_value (&clear_key, &key); + used_size = DB_PAGESIZE - spage_get_free_space (thread_p, page_ptr); prefix = btree_node_common_prefix (thread_p, btid, page_ptr); @@ -10635,8 +10775,7 @@ btree_key_append_object_as_new_overflow (THREAD_ENTRY * thread_p, BTID_INT * bti assert (leaf_rec != NULL); assert (first_ovfl_vpid != NULL); assert (insert_helper->rv_redo_data != NULL && insert_helper->rv_redo_data_ptr != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); save_sysop_started = insert_helper->is_system_op_started; if (!insert_helper->is_system_op_started) @@ -10709,7 +10848,7 @@ btree_key_append_object_as_new_overflow (THREAD_ENTRY * thread_p, BTID_INT * bti if (!save_sysop_started && insert_helper->is_system_op_started) { /* This might be a problem since compensate was not successfully executed. */ - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT); + assert (insert_helper->purpose != BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); log_sysop_abort (thread_p); insert_helper->is_system_op_started = false; } @@ -10745,7 +10884,7 @@ btree_key_append_object_to_overflow (THREAD_ENTRY * thread_p, BTID_INT * btid_in char rv_undo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; char *rv_undo_data = PTR_ALIGN (rv_undo_data_buffer, BTREE_MAX_ALIGN); char *rv_undo_data_ptr = NULL; - int rv_undo_data_length; + int rv_undo_data_length = 0; char rv_redo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; char *rv_redo_data = PTR_ALIGN (rv_redo_data_buffer, BTREE_MAX_ALIGN); char *rv_redo_data_ptr = rv_redo_data; @@ -10756,8 +10895,7 @@ btree_key_append_object_to_overflow (THREAD_ENTRY * thread_p, BTID_INT * btid_in assert (ovfl_page != NULL); assert (object_info != NULL); assert (insert_helper != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); /* Prepare record. */ ovfl_rec.type = REC_HOME; @@ -10804,29 +10942,12 @@ btree_key_append_object_to_overflow (THREAD_ENTRY * thread_p, BTID_INT * btid_in LSA_COPY (&prev_lsa, pgbuf_get_lsa (ovfl_page)); BTREE_RV_GET_DATA_LENGTH (rv_redo_data_ptr, rv_redo_data, rv_redo_data_length); - if (insert_helper->is_system_op_started) + if (rv_undo_data_ptr != NULL) { - /* Physical logging. */ - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT && BTREE_IS_UNIQUE (btid_int->unique_pk)); BTREE_RV_GET_DATA_LENGTH (rv_undo_data_ptr, rv_undo_data, rv_undo_data_length); - log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &addr, rv_undo_data_length, rv_redo_data_length, - rv_undo_data, rv_redo_data); - } - else if (insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE) - { - /* Compensate. */ - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (ovfl_page), - addr.offset, ovfl_page, rv_redo_data_length, rv_redo_data, - LOG_FIND_CURRENT_TDES (thread_p), &insert_helper->compensate_undo_nxlsa); - } - else /* BTREE_OP_INSERT_NEW_OBJECT */ - { - /* Logical undo logging. */ - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT); - assert (!BTREE_IS_UNIQUE (btid_int->unique_pk)); - log_append_undoredo_data (thread_p, insert_helper->rcvindex, &addr, insert_helper->rv_keyval_data_length, - rv_redo_data_length, insert_helper->rv_keyval_data, rv_redo_data); } + btree_rv_log_insert_object (thread_p, *insert_helper, addr, rv_undo_data_length, rv_redo_data_length, rv_undo_data, + rv_redo_data); btree_insert_log (insert_helper, BTREE_INSERT_MODIFY_MSG ("append object at the end of record"), BTREE_INSERT_MODIFY_ARGS (thread_p, insert_helper, ovfl_page, &prev_lsa, false, 1, ovfl_rec.length, @@ -10909,7 +11030,7 @@ btree_find_free_overflow_oids_page (THREAD_ENTRY * thread_p, BTID_INT * btid, VP if (spage_max_space_for_new_record (thread_p, *overflow_page) > space_needed) { - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); return NO_ERROR; } @@ -10918,7 +11039,7 @@ btree_find_free_overflow_oids_page (THREAD_ENTRY * thread_p, BTID_INT * btid, VP pgbuf_unfix_and_init (thread_p, *overflow_page); } - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); return NO_ERROR; } @@ -10940,6 +11061,8 @@ btree_find_free_overflow_oids_page (THREAD_ENTRY * thread_p, BTID_INT * btid, VP * will output NULL. If object is in first overflow, it will output leaf page. * If argument is NULL, previous overflow page is unfixed. * offset_to_object (out) : Offset to object in the record of leaf/overflow. + * + * TODO: output overflow record */ static int btree_find_oid_and_its_page (THREAD_ENTRY * thread_p, BTID_INT * btid_int, OID * oid, PAGE_PTR leaf_page, @@ -10990,7 +11113,7 @@ btree_find_oid_and_its_page (THREAD_ENTRY * thread_p, BTID_INT * btid_int, OID * { PERF_UTIME_TRACKER_START (thread_p, &ovf_fix_time_track); overflow_page = pgbuf_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH); - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); if (overflow_page == NULL) { ASSERT_ERROR_AND_SET (error_code); @@ -11039,6 +11162,7 @@ btree_find_oid_and_its_page (THREAD_ENTRY * thread_p, BTID_INT * btid_int, OID * { assert (overflow_page != NULL); *found_page = overflow_page; + if (prev_page != NULL) { *prev_page = prev_overflow_page != NULL ? prev_overflow_page : leaf_page; @@ -11953,7 +12077,7 @@ btree_find_split_point (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_ goto error; } - db_make_null (mid_key); + btree_init_temp_key_value (&m_clear_key, mid_key); if (*mid_slot == (slot_id - 1) && is_key_added_to_left && !found) { @@ -12011,7 +12135,7 @@ btree_find_split_point (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_ goto error; } - db_make_null (next_key); + btree_init_temp_key_value (&n_clear_key, next_key); if (*mid_slot == key_cnt && slot_id == (key_cnt + 1)) { @@ -12247,6 +12371,9 @@ btree_node_common_prefix (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR pag LEAF_REC leaf_pnt; int error = NO_ERROR; + btree_init_temp_key_value (&lf_clear_key, &lf_key); + btree_init_temp_key_value (&uf_clear_key, &uf_key); + if (btree_node_is_compressed (thread_p, btid, page_ptr) == false) { return 0; @@ -12327,6 +12454,8 @@ btree_recompress_record (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * assert (btid_int != NULL); assert (record != NULL); + btree_init_temp_key_value (&clear_key, &key); + if (old_prefix == new_prefix) { /* Recompression is not needed. */ @@ -12415,6 +12544,8 @@ btree_compress_node (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_ptr return diff_column; } + btree_init_temp_key_value (&clear_key, &key); + /* compress prefix */ for (i = 2; i < key_cnt; i++) { @@ -13024,7 +13155,7 @@ btree_set_split_point (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_p assert (false); } - db_make_null (mid_key); + btree_init_temp_key_value (&m_clear_key, mid_key); /* the split key is one of the keys on the page */ assert (mid_slot > 0); @@ -13058,7 +13189,7 @@ btree_set_split_point (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR page_p assert (false); } - db_make_null (next_key); + btree_init_temp_key_value (&n_clear_key, next_key); if (mid_slot == key_cnt && slot_id == (key_cnt + 1)) { @@ -16170,6 +16301,7 @@ btree_find_min_or_max_key (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, } db_make_null (key); + btree_init_temp_key_value (&clear_key, &key_value); BTS = &btree_scan; BTREE_INIT_SCAN (BTS); @@ -16243,11 +16375,7 @@ btree_find_min_or_max_key (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, (void) pr_clone_value (&key_value, key); - if (clear_key) - { - pr_clear_value (&key_value); - clear_key = false; - } + btree_clear_key_value (&clear_key, &key_value); } end: @@ -16272,11 +16400,7 @@ btree_find_min_or_max_key (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, pgbuf_unfix_and_init (thread_p, root_page_ptr); } - if (clear_key) - { - pr_clear_value (&key_value); - clear_key = false; - } + btree_clear_key_value (&clear_key, &key_value); return ret; @@ -16484,6 +16608,7 @@ btree_rv_save_keyval_for_undo (BTID_INT * btid, DB_VALUE * key, OID * cls_oid, O break; case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: /* Object is being physically removed. Since on rollback we should also recover MVCC information, it must be * packed. */ assert (mvcc_info != NULL); @@ -19419,12 +19544,14 @@ btree_verify_nonleaf_node (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PAGE_PT assert_release (btid_int != NULL); assert_release (page_ptr != NULL); - clear_prev_key = clear_curr_key = false; key_domain = btid_int->key_type; key_cnt = btree_node_number_of_keys (thread_p, page_ptr); assert_release (key_cnt >= 1); + btree_init_temp_key_value (&clear_prev_key, &prev_key); + btree_init_temp_key_value (&clear_curr_key, &curr_key); + /* check key order; exclude neg-inf separator */ for (i = 1; i < key_cnt; i++) { @@ -19546,9 +19673,9 @@ btree_verify_leaf_node (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PAGE_PTR p prev_vpid = header->prev_vpid; next_vpid = header->next_vpid; - db_make_null (&curr_key); - db_make_null (&prev_key); - db_make_null (&lower_fence_key); + btree_init_temp_key_value (&clear_curr_key, &curr_key); + btree_init_temp_key_value (&clear_prev_key, &prev_key); + btree_init_temp_key_value (&clear_lower_fence_key, &lower_fence_key); db_make_null (&uncompressed_value); common_prefix = btree_node_common_prefix (thread_p, btid_int, page_ptr); @@ -20065,7 +20192,6 @@ btree_get_next_node_info (THREAD_ENTRY * thread_p, BTID * btid, BTREE_NODE_SCAN node_type = (node_header->node_level > 1) ? BTREE_NON_LEAF_NODE : BTREE_LEAF_NODE; key_cnt = btree_node_number_of_keys (thread_p, btns->crt_page); - rec_header = (node_type == BTREE_NON_LEAF_NODE) ? (void *) &nleaf : (void *) &leaf_pnt; if (node_type == BTREE_NON_LEAF_NODE) @@ -20108,6 +20234,8 @@ btree_get_next_node_info (THREAD_ENTRY * thread_p, BTID * btid, BTREE_NODE_SCAN if (key_cnt > 0) { + btree_init_temp_key_value (&clear_key, &key_value); + /* Get first key */ if (spage_get_record (thread_p, btns->crt_page, 1, &rec, PEEK) != S_SUCCESS) { @@ -20118,8 +20246,10 @@ btree_get_next_node_info (THREAD_ENTRY * thread_p, BTID * btid, BTREE_NODE_SCAN { goto error; } + pr_clear_value (node_info[BTREE_NODE_INFO_FIRST_KEY]); - *node_info[BTREE_NODE_INFO_FIRST_KEY] = key_value; /* just copy. it will be cleared later */ + pr_clone_value (&key_value, node_info[BTREE_NODE_INFO_FIRST_KEY]); + btree_clear_key_value (&clear_key, &key_value); /* Get last key */ if (spage_get_record (thread_p, btns->crt_page, key_cnt, &rec, PEEK) != S_SUCCESS) @@ -20131,8 +20261,10 @@ btree_get_next_node_info (THREAD_ENTRY * thread_p, BTID * btid, BTREE_NODE_SCAN { goto error; } + pr_clear_value (node_info[BTREE_NODE_INFO_LAST_KEY]); - *node_info[BTREE_NODE_INFO_LAST_KEY] = key_value; /* just copy. it will be cleared later */ + pr_clone_value (&key_value, node_info[BTREE_NODE_INFO_LAST_KEY]); + btree_clear_key_value (&clear_key, &key_value); } else { @@ -20884,22 +21016,19 @@ btree_key_find_first_visible_row (THREAD_ENTRY * thread_p, BTID_INT * btid_int, /* Clear flags */ BTREE_OID_CLEAR_ALL_FLAGS (oid); - if (BTREE_IS_UNIQUE (btid_int->unique_pk)) + if (btree_is_class_oid_packed (btid_int, rec, node_type, is_first)) { - if (node_type == BTREE_OVERFLOW_NODE || !is_first || btree_leaf_is_flaged (rec, BTREE_LEAF_RECORD_CLASS_OID)) - { - /* Read class OID */ - if (or_get_oid (&buf, class_oid) != NO_ERROR) - { - goto error; - } - } - else + /* Read class OID */ + if (or_get_oid (&buf, class_oid) != NO_ERROR) { - /* Class OID is top class OID */ - COPY_OID (class_oid, &btid_int->topclass_oid); + goto error; } } + else if (BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + /* Class OID is top class OID */ + COPY_OID (class_oid, &btid_int->topclass_oid); + } /* Get MVCC information */ if (btree_or_get_mvccinfo (&buf, &mvcc_info, mvcc_flags) != NO_ERROR) @@ -21743,9 +21872,7 @@ btree_check_valid_record (THREAD_ENTRY * thread_p, BTID_INT * btid, RECDES * rec assert (false); return ER_FAILED; } - if (BTREE_IS_UNIQUE (btid->unique_pk) - && (node_type == BTREE_OVERFLOW_NODE || !is_first_oid - || btree_leaf_is_flaged (recp, BTREE_LEAF_RECORD_CLASS_OID))) + if (btree_is_class_oid_packed (btid, recp, node_type, is_first_oid)) { /* Get and check class OID */ if (or_get_oid (&buffer, &class_oid) != NO_ERROR) @@ -21768,6 +21895,9 @@ btree_check_valid_record (THREAD_ENTRY * thread_p, BTID_INT * btid, RECDES * rec assert (false); return ER_FAILED; } + + /* Remove any possible online_index flags. */ + mvccid &= BTREE_ONLINE_INDEX_MVCCID_MASK; if (!MVCCID_IS_VALID (mvccid)) { assert (false); @@ -23187,7 +23317,7 @@ btree_key_find_and_lock_unique_of_non_unique (THREAD_ENTRY * thread_p, BTID_INT /* Fix next overflow page. */ overflow_page = pgbuf_fix (thread_p, &next_overflow_vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH); - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); if (overflow_page == NULL) { ASSERT_ERROR_AND_SET (error_code); @@ -23470,7 +23600,7 @@ btree_key_lock_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * /* Lock object. */ PERF_UTIME_TRACKER_START (thread_p, &lock_time_track); lock_result = lock_object (thread_p, oid, class_oid, lock_mode, LK_UNCOND_LOCK); - BTREE_PERF_UNIQUE_LOCK_TIME (thread_p, &lock_time_track, lock_mode); + btree_perf_unique_lock_time (thread_p, &lock_time_track, lock_mode); if (lock_result != LK_GRANTED) { ASSERT_ERROR_AND_SET (error_code); @@ -23680,7 +23810,7 @@ btree_key_process_objects (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES /* Fix overflow page. */ PERF_UTIME_TRACKER_START (thread_p, &ovf_fix_time_track); ovf_page = pgbuf_fix (thread_p, &ovf_vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH); - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); if (ovf_page == NULL) { ASSERT_ERROR_AND_SET (error_code); @@ -25165,7 +25295,7 @@ btree_range_scan_select_visible_oids (THREAD_ENTRY * thread_p, BTREE_SCAN * bts) /* Fix next overflow page. */ PERF_UTIME_TRACKER_START (thread_p, &ovf_fix_time_track); overflow_page = pgbuf_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH); - BTREE_PERF_OVF_OIDS_FIX_TIME (thread_p, &ovf_fix_time_track); + btree_perf_ovf_oids_fix_time (thread_p, &ovf_fix_time_track); if (overflow_page == NULL) { ASSERT_ERROR_AND_SET (error_code); @@ -25469,7 +25599,7 @@ btree_select_visible_object_for_range_scan (THREAD_ENTRY * thread_p, BTID_INT * ASSERT_ERROR (); return error_code; } - db_make_null (&bts->cur_key); + btree_clear_key_value (&bts->clear_cur_key, &bts->cur_key); } else { @@ -25913,9 +26043,6 @@ btree_mvcc_delete (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID * c * mvcc_info (in) : B-tree MVCC information. * undo_nxlsa (in) : UNDO next lsa for logical compensate. * purpose (in) : B-tree insert purpose - * BTREE_OP_INSERT_NEW_OBJECT - * BTREE_OP_INSERT_MVCC_DELID - * BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE. */ static int btree_insert_internal (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID * class_oid, OID * oid, int op_type, @@ -25934,9 +26061,8 @@ btree_insert_internal (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID /* Assert expected arguments. */ assert (btid != NULL); assert (oid != NULL); - /* Assert class OID is valid or not required. */ - assert ((purpose != BTREE_OP_INSERT_NEW_OBJECT && purpose != BTREE_OP_INSERT_MVCC_DELID - && purpose != BTREE_OP_INSERT_MARK_DELETED) || (class_oid != NULL && !OID_ISNULL (class_oid))); + /* Assert class OID is valid or not required; not required for undo delete */ + assert (purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE || (class_oid != NULL && !OID_ISNULL (class_oid))); PERF_UTIME_TRACKER_START (thread_p, &insert_helper.time_track); @@ -26064,9 +26190,9 @@ btree_insert_internal (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID BTREE_INSERT_HELPER_MSG ("\t") "\t" BTREE_ID_MSG "\n" "\t" "%s: new stats = %d keys, %d objects, %d nulls.", - insert_helper.purpose == BTREE_OP_INSERT_NEW_OBJECT ? "Insert" : "MVCC Delete", + (btree_is_insert_object_purpose (insert_helper.purpose)) ? "Insert" : "MVCC Delete", BTREE_INSERT_HELPER_AS_ARGS (&insert_helper), BTID_AS_ARGS (btid_int.sys_btid), - insert_helper.purpose == BTREE_OP_INSERT_NEW_OBJECT ? + (btree_is_insert_object_purpose (insert_helper.purpose)) ? (insert_helper.is_unique_key_added_or_deleted ? "Added new key" : "Did not add new key") : (insert_helper.is_unique_key_added_or_deleted) ? "Removed key" : "Did not remove key", unique_stat_info->num_keys, unique_stat_info->num_oids, unique_stat_info->num_nulls); @@ -26118,9 +26244,7 @@ btree_fix_root_for_insert (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid * insert MVCCID. 2. Undo of physical delete. If an object is physically removed from b-tree and operation must be * undone, the object with all its additional information existing before delete must be inserted. 3. Logical delete, * which inserts a delete MVCCID. */ - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT || insert_helper->purpose == BTREE_OP_INSERT_MVCC_DELID - || insert_helper->purpose == BTREE_OP_INSERT_MARK_DELETED - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_data_purpose (insert_helper->purpose)); /* Fixing root page. */ insert_helper->is_root = true; @@ -26173,7 +26297,8 @@ btree_fix_root_for_insert (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid &insert_helper->printed_key_sha1); } - if (insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE) + if (insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE + || insert_helper->purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE) { /* Stop here. */ /* Code after this: 1. Update unique statistics. In this case, they are updated by undone log records. 2. Create @@ -26241,12 +26366,15 @@ btree_fix_root_for_insert (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid else { /* Update transactions collected statistics. */ - error_code = - logtb_tran_update_unique_stats (thread_p, btid, increment_keys, increment_oids, increment_nulls, true); - if (error_code != NO_ERROR) + if (!btree_is_online_index_loading (insert_helper->purpose)) { - ASSERT_ERROR (); - goto error; + error_code = + logtb_tran_update_unique_stats (thread_p, btid, increment_keys, increment_oids, increment_nulls, true); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + goto error; + } } } } @@ -26271,8 +26399,7 @@ btree_fix_root_for_insert (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid return NO_ERROR; } - /* Purpose is BTREE_OP_INSERT_NEW_OBJECT. */ - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); /* Check if key length is too big and if an overflow key file needs to be created. */ key_len = btree_get_disk_size_of_key (key); @@ -26390,6 +26517,10 @@ btree_get_max_new_data_size (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PAGE_ { case BTREE_OP_INSERT_NEW_OBJECT: case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: if (known_to_be_found) { /* Possible inserted data: 1. New object (consider maximum size including all info). 2. Link to overflow page @@ -26514,8 +26645,7 @@ btree_split_node_and_advance (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_V assert (key_count > 0 || node_type == BTREE_LEAF_NODE); /* Is new key possible? True if inserting new object or if undoing the removal of some key/object. */ - is_new_key_possible = (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + is_new_key_possible = btree_is_insert_object_purpose (insert_helper->purpose); /* Split algorithm: There are two types of splits: root split and normal split. 1. Root split: If there is not enough * space for new data in root, split it into three nodes: two nodes containing all previous entries and a new root @@ -27081,8 +27211,7 @@ btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VA assert (search_key->slotid > 0 && search_key->slotid <= btree_node_number_of_keys (thread_p, *leaf_page) + 1); assert (restart != NULL); assert (insert_helper != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); /* Do not allow inserting a deleted object. It should never happen Insert new object should insert objects with no * delete MVCCID. Rollback of object physical removal, cannot reach here with a deleted object. There are three @@ -27090,7 +27219,7 @@ btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VA * delete MVCCID). - Vacuum (deleted object). However, vacuum is not rollbacked. */ assert (!BTREE_MVCC_INFO_IS_DELID_VALID (BTREE_INSERT_MVCC_INFO (insert_helper))); - BTREE_PERF_TRACK_TRAVERSE_TIME (thread_p, insert_helper); + btree_perf_track_traverse_time (thread_p, insert_helper); /* Prepare log data */ insert_helper->leaf_addr.offset = search_key->slotid; @@ -27099,7 +27228,9 @@ btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VA /* Based on recovery index it is know if this is MVCC-like operation or not. Particularly important for vacuum. */ /* Undo physical delete will add a compensate record and doesn't require undo recovery data. */ /* Prepare undo data. */ - if (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT) + if (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT + || insert_helper->purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT + || insert_helper->purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF) { insert_helper->rcvindex = BTREE_MVCC_INFO_IS_INSID_NOT_ALL_VISIBLE (BTREE_INSERT_MVCC_INFO (insert_helper)) ? RVBT_MVCC_INSERT_OBJECT : @@ -27137,7 +27268,7 @@ btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VA { db_private_free_and_init (thread_p, insert_helper->rv_keyval_data); } - BTREE_PERF_TRACK_TIME (thread_p, insert_helper); + btree_perf_track_time (thread_p, insert_helper); return NO_ERROR; } /* Key was found. Append new object to existing key. */ @@ -27215,7 +27346,7 @@ btree_key_insert_new_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VA } insert_helper->rv_keyval_data = NULL; - BTREE_PERF_TRACK_TIME (thread_p, insert_helper); + btree_perf_track_time (thread_p, insert_helper); return error_code; error: @@ -27273,7 +27404,8 @@ btree_key_insert_new_key (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE assert (insert_helper->rv_redo_data != NULL && insert_helper->rv_redo_data_ptr != NULL); assert (insert_helper->is_system_op_started == false); #if defined (SERVER_MODE) - assert (!BTREE_IS_UNIQUE (btid_int->unique_pk) || log_is_in_crash_recovery () + assert ((btree_is_online_index_loading (insert_helper->purpose)) || !BTREE_IS_UNIQUE (btid_int->unique_pk) + || log_is_in_crash_recovery () || lock_has_lock_on_object (BTREE_INSERT_OID (insert_helper), BTREE_INSERT_CLASS_OID (insert_helper), logtb_get_current_tran_index (), X_LOCK) > 0); #endif /* SERVER_MODE */ @@ -27359,6 +27491,7 @@ btree_key_insert_new_key (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE node_header->max_key_len = key_len; } + assert (node_header->split_info.pivot >= 0 && key_cnt > 0); btree_split_next_pivot (&node_header->split_info, (float) search_key->slotid / key_cnt, key_cnt); @@ -27381,35 +27514,21 @@ btree_key_insert_new_key (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE memcpy (rv_redo_data_ptr, record.data, record.length); rv_redo_data_ptr += record.length; - /* We need to log previous lsa. */ LSA_COPY (&prev_lsa, pgbuf_get_lsa (leaf_page)); /* Add logging. */ rv_redo_data_length = CAST_BUFLEN (rv_redo_data_ptr - rv_redo_data); assert (rv_redo_data_length < DB_PAGESIZE); + + btree_rv_log_insert_object (thread_p, *insert_helper, insert_helper->leaf_addr, 0, rv_redo_data_length, NULL, + rv_redo_data); + if (insert_helper->is_system_op_started) { - /* undo/redo physical. */ - log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &insert_helper->leaf_addr, 0, - rv_redo_data_length, NULL, rv_redo_data); - + // also end sysop btree_insert_sysop_end (thread_p, insert_helper); } - else if (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT) - { - /* Undo/redo logging. */ - log_append_undoredo_data (thread_p, insert_helper->rcvindex, &insert_helper->leaf_addr, - insert_helper->rv_keyval_data_length, rv_redo_data_length, - insert_helper->rv_keyval_data, rv_redo_data); - } - else /* BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE */ - { - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (leaf_page), - insert_helper->leaf_addr.offset, leaf_page, rv_redo_data_length, - rv_redo_data, LOG_FIND_CURRENT_TDES (thread_p), - &insert_helper->compensate_undo_nxlsa); - } if (insert_helper->log_operations) { @@ -27468,7 +27587,6 @@ btree_key_insert_new_key (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE return error_code; } -#if defined (SERVER_MODE) /* * btree_key_insert_does_leaf_need_split () - Check if there is not enough space in leaf node to handle new object. * @@ -27503,7 +27621,6 @@ btree_key_insert_does_leaf_need_split (THREAD_ENTRY * thread_p, BTID_INT * btid_ return (max_new_data_size > spage_max_space_for_new_record (thread_p, leaf_page)); } } -#endif /* SERVER_MODE */ /* * btree_key_lock_and_append_object_unique () - Append new object into an existing unique index key. @@ -27900,8 +28017,7 @@ btree_key_append_object_non_unique (THREAD_ENTRY * thread_p, BTID_INT * btid_int assert (leaf_record != NULL); assert (leaf_info != NULL); assert (btree_obj != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); assert (insert_helper->rv_redo_data != NULL && insert_helper->rv_redo_data_ptr != NULL); if (BTREE_IS_UNIQUE (btid_int->unique_pk)) @@ -27948,20 +28064,8 @@ btree_key_append_object_non_unique (THREAD_ENTRY * thread_p, BTID_INT * btid_int /* Log changes. */ BTREE_RV_GET_DATA_LENGTH (insert_helper->rv_redo_data_ptr, insert_helper->rv_redo_data, rv_redo_data_length); - if (insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE) - { - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (leaf), - insert_helper->leaf_addr.offset, leaf, rv_redo_data_length, - insert_helper->rv_redo_data, LOG_FIND_CURRENT_TDES (thread_p), - &insert_helper->compensate_undo_nxlsa); - } - else /* BTREE_OP_INSERT_NEW_OBJECT */ - { - /* Add logging. */ - log_append_undoredo_data (thread_p, insert_helper->rcvindex, &insert_helper->leaf_addr, - insert_helper->rv_keyval_data_length, rv_redo_data_length, - insert_helper->rv_keyval_data, insert_helper->rv_redo_data); - } + btree_rv_log_insert_object (thread_p, *insert_helper, insert_helper->leaf_addr, 0, rv_redo_data_length, + NULL, insert_helper->rv_redo_data); pgbuf_set_dirty (thread_p, leaf, DONT_FREE); btree_insert_log (insert_helper, BTREE_INSERT_MODIFY_MSG ("append object at the end"), @@ -28022,11 +28126,12 @@ btree_key_append_object_unique (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB assert (leaf_record != NULL); assert (leaf_record_info != NULL); assert (offset_after_key > 0); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); assert (insert_helper->rv_redo_data != NULL); assert (insert_helper->rv_keyval_data != NULL && insert_helper->rv_keyval_data_length > 0); assert (insert_helper->leaf_addr.offset != 0 && insert_helper->leaf_addr.pgptr == leaf); - assert (insert_helper->rcvindex == RVBT_MVCC_INSERT_OBJECT || insert_helper->rcvindex == RVBT_NON_MVCC_INSERT_OBJECT + assert (insert_helper->rcvindex == RVBT_MVCC_INSERT_OBJECT + || insert_helper->rcvindex == RVBT_NON_MVCC_INSERT_OBJECT || insert_helper->rcvindex == RVBT_MVCC_INSERT_OBJECT_UNQ); assert (first_object != NULL && !OID_ISNULL (&first_object->oid)); @@ -28145,7 +28250,7 @@ btree_key_relocate_last_into_ovf (THREAD_ENTRY * thread_p, BTID_INT * btid_int, assert (leaf_record_info != NULL); assert (offset_after_key > 0); assert (insert_helper != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); assert (insert_helper->leaf_addr.offset != 0 && insert_helper->leaf_addr.pgptr == leaf); /* Relocate last object object in leaf record into an overflow page. */ @@ -28286,8 +28391,7 @@ btree_key_append_object_into_ovf (THREAD_ENTRY * thread_p, BTID_INT * btid_int, assert (leaf_record != NULL); assert (leaf_record_info != NULL); assert (insert_helper != NULL); - assert (insert_helper->purpose == BTREE_OP_INSERT_NEW_OBJECT - || insert_helper->purpose == BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE); + assert (btree_is_insert_object_purpose (insert_helper->purpose)); assert (append_object != NULL); /* Is there enough space in existing overflow pages? */ @@ -28383,7 +28487,7 @@ btree_key_find_and_insert_delete_mvccid (THREAD_ENTRY * thread_p, BTID_INT * bti assert (insert_helper->purpose == BTREE_OP_INSERT_MVCC_DELID || insert_helper->purpose == BTREE_OP_INSERT_MARK_DELETED); - BTREE_PERF_TRACK_TRAVERSE_TIME (thread_p, insert_helper); + btree_perf_track_traverse_time (thread_p, insert_helper); if (search_key->result != BTREE_KEY_FOUND) { @@ -28490,7 +28594,7 @@ btree_key_find_and_insert_delete_mvccid (THREAD_ENTRY * thread_p, BTID_INT * bti exit: - BTREE_PERF_TRACK_TIME (thread_p, insert_helper); + btree_perf_track_time (thread_p, insert_helper); return error_code; } @@ -28997,7 +29101,7 @@ btree_rv_record_modify_internal (THREAD_ENTRY * thread_p, LOG_RCV * rcv, bool is bool clear_key; char *printed_key = NULL; - db_make_null (&key); + btree_init_temp_key_value (&clear_key, &key); (void) btree_read_record (thread_p, &btid_int_for_debug, rcv->pgptr, &update_record, &key, &leaf_rec_info, node_type, &clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); @@ -29101,7 +29205,7 @@ btree_rv_record_modify_internal (THREAD_ENTRY * thread_p, LOG_RCV * rcv, bool is * crash). */ if (node_type == BTREE_LEAF_NODE && !btree_leaf_is_flaged (&update_record, BTREE_LEAF_RECORD_OVERFLOW_KEY)) { - db_make_null (&key); + btree_init_temp_key_value (&clear_key, &key); (void) btree_read_record (thread_p, &btid_int_for_debug, rcv->pgptr, &update_record, &key, &leaf_rec_info, node_type, &clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); printed_key = pr_valstring (thread_p, &key); @@ -29653,13 +29757,7 @@ btree_fix_root_for_delete (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid assert (btid_int != NULL); assert (root_page != NULL && *root_page == NULL); assert (delete_helper != NULL); - assert (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_DELID - || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_INSID - || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); + assert (btree_is_delete_data_purpose (delete_helper->purpose)); /* Root node is being fixed. */ delete_helper->is_root = true; @@ -29740,7 +29838,9 @@ btree_fix_root_for_delete (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid } /* Safe guard: key cannot always be NULL. */ - assert (!is_null || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL); + assert (!is_null || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL + || delete_helper->purpose == BTREE_OP_ONLINE_INDEX_TRAN_DELETE + || delete_helper->purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT); if (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_INSID || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT) { @@ -29750,7 +29850,8 @@ btree_fix_root_for_delete (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid if (delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_DELID - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED) + || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED + || delete_helper->purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT) { if (BTREE_IS_UNIQUE (btid_int->unique_pk)) { @@ -29779,7 +29880,7 @@ btree_fix_root_for_delete (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid return NO_ERROR; } - assert (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL); + assert (btree_is_delete_object_purpose (delete_helper->purpose)); /* Update unique statistics. */ if (BTREE_IS_UNIQUE (btid_int->unique_pk) && delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL) @@ -29816,12 +29917,15 @@ btree_fix_root_for_delete (THREAD_ENTRY * thread_p, BTID * btid, BTID_INT * btid else { /* Save and log statistics changes. */ - error_code = - logtb_tran_update_unique_stats (thread_p, btid, increment_keys, increment_oids, increment_nulls, true); - if (error_code != NO_ERROR) + if (!btree_is_online_index_loading (delete_helper->purpose)) { - ASSERT_ERROR (); - return error_code; + error_code = + logtb_tran_update_unique_stats (thread_p, btid, increment_keys, increment_oids, increment_nulls, true); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } } } } @@ -30233,7 +30337,6 @@ btree_merge_node_and_advance (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_V { /* this can cause a lot of problems. but we need it to know that page was not fixed because not in buffer and not * because some error occurred. */ - assert (!er_has_error ()); er_clear (); /* Check merges. */ @@ -30480,12 +30583,10 @@ btree_key_delete_remove_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB assert (leaf_page != NULL && *leaf_page != NULL && pgbuf_get_latch_mode (*leaf_page) >= PGBUF_LATCH_WRITE); assert (search_key != NULL); assert (delete_helper != NULL); - assert (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED); + assert (btree_is_delete_object_purpose (delete_helper->purpose) + && delete_helper->purpose != BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); - BTREE_PERF_TRACK_TRAVERSE_TIME (thread_p, delete_helper); + btree_perf_track_traverse_time (thread_p, delete_helper); if (search_key->result == BTREE_KEY_FOUND) { @@ -30691,7 +30792,7 @@ btree_key_delete_remove_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB db_private_free_and_init (thread_p, delete_helper->rv_keyval_data); } - BTREE_PERF_TRACK_TIME (thread_p, delete_helper); + btree_perf_track_time (thread_p, delete_helper); return error_code; } @@ -31027,10 +31128,8 @@ btree_leaf_record_replace_first_with_last (THREAD_ENTRY * thread_p, BTID_INT * b assert (last_class_oid != NULL); assert (last_mvcc_info != NULL); assert (offset_to_last_object > 0 && offset_to_last_object < leaf_record->length); - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT); + assert (btree_is_delete_object_purpose (delete_helper->purpose) + && delete_helper->purpose != BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); assert (delete_helper->rv_redo_data != NULL && delete_helper->rv_redo_data_ptr != NULL); #if !defined (NDEBUG) @@ -31063,38 +31162,9 @@ btree_leaf_record_replace_first_with_last (THREAD_ENTRY * thread_p, BTID_INT * b /* Log changes. */ BTREE_RV_GET_DATA_LENGTH (delete_helper->rv_redo_data_ptr, delete_helper->rv_redo_data, rv_redo_data_length); - if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL) - { - /* Add undoredo log. */ - assert (!delete_helper->is_system_op_started); - log_append_undoredo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &delete_helper->leaf_addr, - delete_helper->rv_keyval_data_length, rv_redo_data_length, - delete_helper->rv_keyval_data, delete_helper->rv_redo_data); - } - else if (delete_helper->is_system_op_started) - { - BTREE_RV_GET_DATA_LENGTH (rv_undo_data_ptr, rv_undo_data, rv_undo_data_length); - log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &delete_helper->leaf_addr, rv_undo_data_length, - rv_redo_data_length, rv_undo_data, delete_helper->rv_redo_data); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT) - { - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (leaf_page), - delete_helper->leaf_addr.offset, leaf_page, rv_redo_data_length, - delete_helper->rv_redo_data, LOG_FIND_CURRENT_TDES (thread_p), - &delete_helper->reference_lsa); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED) - { - log_append_run_postpone (thread_p, RVBT_RECORD_MODIFY_NO_UNDO, &delete_helper->leaf_addr, - pgbuf_get_vpid_ptr (leaf_page), rv_redo_data_length, delete_helper->rv_redo_data, - &delete_helper->reference_lsa); - } - else /* BTREE_OP_DELETE_VACUUM_OBJECT */ - { - log_append_redo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &delete_helper->leaf_addr, rv_redo_data_length, - delete_helper->rv_redo_data); - } + assert (!delete_helper->is_system_op_started || delete_helper->purpose != BTREE_OP_DELETE_OBJECT_PHYSICAL); + btree_rv_log_delete_object (thread_p, *delete_helper, delete_helper->leaf_addr, rv_undo_data_length, + rv_redo_data_length, rv_undo_data_ptr, delete_helper->rv_redo_data); FI_TEST (thread_p, FI_TEST_BTREE_MANAGER_RANDOM_EXIT, 0); @@ -31144,11 +31214,7 @@ btree_record_remove_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, BTREE_ assert (record != NULL); assert (search_key != NULL && search_key->result == BTREE_KEY_FOUND && search_key->slotid > 0); assert (addr != NULL && addr->offset != 0 && addr->pgptr == page); - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); + assert (btree_is_delete_object_purpose (delete_helper->purpose)); assert (delete_helper->rv_redo_data != NULL && delete_helper->rv_redo_data_ptr != NULL); /* Safe guard: first object in leaf record cannot be handled here. */ @@ -31191,38 +31257,9 @@ btree_record_remove_object (THREAD_ENTRY * thread_p, BTID_INT * btid_int, BTREE_ /* Add logging. */ BTREE_RV_GET_DATA_LENGTH (delete_helper->rv_redo_data_ptr, delete_helper->rv_redo_data, rv_redo_data_length); assert (rv_redo_data_length > 0); - if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL) - { - /* Add undo/redo logging. */ - assert (!delete_helper->is_system_op_started); - log_append_undoredo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, addr, delete_helper->rv_keyval_data_length, - rv_redo_data_length, delete_helper->rv_keyval_data, delete_helper->rv_redo_data); - } - else if (delete_helper->is_system_op_started) - { - BTREE_RV_GET_DATA_LENGTH (rv_undo_data_ptr, rv_undo_data, rv_undo_data_length); - log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, addr, rv_undo_data_length, rv_redo_data_length, - rv_undo_data, delete_helper->rv_redo_data); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD) - { - log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, pgbuf_get_vpid_ptr (page), - addr->offset, page, rv_redo_data_length, delete_helper->rv_redo_data, - LOG_FIND_CURRENT_TDES (thread_p), &delete_helper->reference_lsa); - } - else if (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED) - { - log_append_run_postpone (thread_p, RVBT_RECORD_MODIFY_NO_UNDO, addr, pgbuf_get_vpid_ptr (page), - rv_redo_data_length, delete_helper->rv_redo_data, &delete_helper->reference_lsa); - } - else /* BTREE_OP_DELETE_VACUUM_OBJECT */ - { - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT); - log_append_redo_data (thread_p, RVBT_RECORD_MODIFY_NO_UNDO, addr, rv_redo_data_length, - delete_helper->rv_redo_data); - } - + assert (!delete_helper->is_system_op_started || delete_helper->purpose != BTREE_OP_DELETE_OBJECT_PHYSICAL); + btree_rv_log_delete_object (thread_p, *delete_helper, *addr, rv_undo_data_length, rv_redo_data_length, + rv_undo_data, delete_helper->rv_redo_data); FI_TEST (thread_p, FI_TEST_BTREE_MANAGER_RANDOM_EXIT, 0); /* Set page dirty. */ @@ -31388,11 +31425,7 @@ btree_overflow_remove_object (THREAD_ENTRY * thread_p, DB_VALUE * key, BTID_INT assert (leaf_page != NULL); assert (leaf_record != NULL); assert (search_key != NULL && search_key->result == BTREE_KEY_FOUND && search_key->slotid > 0); - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); + assert (btree_is_delete_object_purpose (delete_helper->purpose)); /* Read overflow record. */ overflow_record.area_size = DB_PAGESIZE; @@ -31559,11 +31592,7 @@ btree_leaf_remove_object (THREAD_ENTRY * thread_p, DB_VALUE * key, BTID_INT * bt assert (leaf_page != NULL); assert (leaf_record != NULL); assert (search_key != NULL && search_key->result == BTREE_KEY_FOUND && search_key->slotid > 0); - assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD); + assert (btree_is_delete_object_purpose (delete_helper->purpose)); #if !defined (NDEBUG) (void) btree_check_valid_record (thread_p, btid_int, leaf_record, BTREE_LEAF_NODE, NULL); @@ -31703,7 +31732,7 @@ btree_key_remove_insert_mvccid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB assert (delete_helper->purpose == BTREE_OP_DELETE_VACUUM_INSID); assert (VACUUM_IS_THREAD_VACUUM_WORKER (thread_p)); - BTREE_PERF_TRACK_TRAVERSE_TIME (thread_p, delete_helper); + btree_perf_track_traverse_time (thread_p, delete_helper); if (search_key->result == BTREE_KEY_FOUND) { @@ -31830,7 +31859,7 @@ btree_key_remove_insert_mvccid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB pgbuf_unfix_and_init (thread_p, found_page); } - BTREE_PERF_TRACK_TIME (thread_p, delete_helper); + btree_perf_track_time (thread_p, delete_helper); return error_code; } @@ -31880,7 +31909,7 @@ btree_key_remove_delete_mvccid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB assert (delete_helper != NULL); assert (delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_DELID); - BTREE_PERF_TRACK_TRAVERSE_TIME (thread_p, delete_helper); + btree_perf_track_traverse_time (thread_p, delete_helper); if (search_key->result == BTREE_KEY_FOUND) { @@ -31994,7 +32023,7 @@ btree_key_remove_delete_mvccid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB pgbuf_unfix_and_init (thread_p, found_page); } - BTREE_PERF_TRACK_TIME (thread_p, delete_helper); + btree_perf_track_time (thread_p, delete_helper); return error_code; } @@ -32386,11 +32415,7 @@ btree_overflow_record_replace_object (THREAD_ENTRY * thread_p, BTID_INT * btid_i /* Assert expected arguments. */ assert (btid_int != NULL); assert (delete_helper != NULL); - assert (delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED - || delete_helper->purpose == BTREE_OP_DELETE_OBJECT_PHYSICAL - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT - || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD - || delete_helper->purpose == BTREE_OP_DELETE_VACUUM_OBJECT + assert (btree_is_delete_object_purpose (delete_helper->purpose) || delete_helper->purpose == BTREE_OP_DELETE_UNDO_INSERT_DELID); assert (overflow_page != NULL); assert (overflow_record != NULL); @@ -32453,88 +32478,37 @@ btree_record_remove_insid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES int offset_to_object, char **rv_undo_data, char **rv_redo_data, int *displacement) { int insert_mvccid_offset; - char *mvccid_ptr; - char *oid_ptr; bool has_fixed_size = false; MVCCID all_visible_mvccid = MVCCID_ALL_VISIBLE; - bool undo_logging = rv_undo_data != NULL && *rv_undo_data != NULL; - bool redo_logging = rv_redo_data != NULL && *rv_redo_data != NULL; - /* Assert expected arguments. */ assert (btid_int != NULL); assert (record != NULL); assert (node_type == BTREE_LEAF_NODE || node_type == BTREE_OVERFLOW_NODE); assert (offset_to_object >= 0 && offset_to_object < record->length); - has_fixed_size = (node_type == BTREE_OVERFLOW_NODE) || (offset_to_object > 0 && BTREE_IS_UNIQUE (btid_int->unique_pk)) - || (offset_to_object == 0 && btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_OVERFLOW_OIDS)); + has_fixed_size = ((node_type == BTREE_OVERFLOW_NODE) + || (offset_to_object > 0 && BTREE_IS_UNIQUE (btid_int->unique_pk)) + || (offset_to_object == 0 && btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_OVERFLOW_OIDS))); /* Where is insert MVCCID. */ /* Skip object OID. */ insert_mvccid_offset = offset_to_object + OR_OID_SIZE; - if (BTREE_IS_UNIQUE (btid_int->unique_pk) - && (node_type == BTREE_OVERFLOW_NODE || offset_to_object > 0 - || btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_CLASS_OID))) + if (btree_is_class_oid_packed (btid_int, record, node_type, (offset_to_object == 0))) { /* Also class OID is stored. */ insert_mvccid_offset += OR_OID_SIZE; } - /* Set insert MVCCID pointer. */ - mvccid_ptr = record->data + insert_mvccid_offset; - /* Set object pointer */ - oid_ptr = record->data + offset_to_object; - if (has_fixed_size) { - /* Undo log replace. */ - if (undo_logging) - { - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, insert_mvccid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvccid_ptr); - } - - /* Replace. */ - OR_PUT_MVCCID (mvccid_ptr, &all_visible_mvccid); - - /* Redo log replace. */ - if (redo_logging) - { - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, insert_mvccid_offset, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvccid_ptr); - } + btree_set_mvccid (record, insert_mvccid_offset, &all_visible_mvccid, rv_undo_data, rv_redo_data); } else { - if (undo_logging) - { - /* Undo log remove MVCCID. */ - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, insert_mvccid_offset, OR_MVCCID_SIZE, 0, mvccid_ptr); - /* Undo log clear flag. */ - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, offset_to_object + OR_OID_VOLID, OR_SHORT_SIZE, - OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); - } - - /* Remove. */ - RECORD_MOVE_DATA (record, insert_mvccid_offset, insert_mvccid_offset + OR_MVCCID_SIZE); - btree_record_object_clear_mvcc_flags (oid_ptr, BTREE_OID_HAS_MVCC_INSID); - - if (redo_logging) - { - /* Redo log remove MVCCID. */ - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, insert_mvccid_offset, OR_MVCCID_SIZE, 0, NULL); - /* Redo log clear flag. */ - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, offset_to_object + OR_OID_VOLID, OR_SHORT_SIZE, - OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); - } + btree_remove_mvccid (record, offset_to_object, insert_mvccid_offset, BTREE_OID_HAS_MVCC_INSID, rv_undo_data, + rv_redo_data); if (displacement != NULL) { @@ -32564,19 +32538,15 @@ btree_record_remove_delid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES int offset_to_object, char **rv_undo_data, char **rv_redo_data) { int offset_to_delete_mvccid; - char *oid_ptr = NULL; - char *mvccid_ptr = NULL; bool has_fixed_size; MVCCID null_mvccid = MVCCID_NULL; - bool undo_logging = rv_undo_data != NULL && *rv_undo_data != NULL; - bool redo_logging = rv_redo_data != NULL && *rv_redo_data != NULL; - /* Assert expected arguments. */ assert (btid_int != NULL); assert (record != NULL); assert (node_type == BTREE_LEAF_NODE || node_type == BTREE_OVERFLOW_NODE); assert (offset_to_object >= 0 && offset_to_object < record->length); + /* Safe guard: unique indexes are not allowed to remove delete MVCCID unless it is the first object. Otherwise, * object should be relocated to first position. */ assert (!BTREE_IS_UNIQUE (btid_int->unique_pk) || (node_type == BTREE_LEAF_NODE && offset_to_object == 0)); @@ -32584,9 +32554,6 @@ btree_record_remove_delid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES has_fixed_size = (node_type == BTREE_OVERFLOW_NODE || (offset_to_object == 0 && btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_OVERFLOW_OIDS))); - /* Set object OID pointer (to change MVCC flags). */ - oid_ptr = record->data + offset_to_object; - /* Compute offset to delete MVCCID. */ /* Start with offset_to_object. */ /* OID is always saved. */ @@ -32597,64 +32564,21 @@ btree_record_remove_delid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES /* Class OID is also saved. */ offset_to_delete_mvccid += OR_OID_SIZE; } - if (has_fixed_size || btree_record_object_is_flagged (oid_ptr, BTREE_OID_HAS_MVCC_INSID)) + if (has_fixed_size || btree_record_object_is_flagged (record->data + offset_to_object, BTREE_OID_HAS_MVCC_INSID)) { /* Insert MVCCID is also saved. */ offset_to_delete_mvccid += OR_MVCCID_SIZE; } - /* Set MVCCID pointer. */ - mvccid_ptr = record->data + offset_to_delete_mvccid; - /* Remove or replace delete MVCCID. */ if (has_fixed_size) { - if (undo_logging) - { - /* Undo logging: replace MVCCID. */ - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, offset_to_delete_mvccid, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvccid_ptr); - } - - /* Replace. */ - OR_PUT_MVCCID (mvccid_ptr, &null_mvccid); - - if (redo_logging) - { - /* Redo logging: replace MVCCID. */ - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, offset_to_delete_mvccid, OR_MVCCID_SIZE, OR_MVCCID_SIZE, - mvccid_ptr); - } + btree_set_mvccid (record, offset_to_delete_mvccid, &null_mvccid, rv_undo_data, rv_redo_data); } else { - if (undo_logging) - { - /* Undo logging: remove MVCCID. */ - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, offset_to_delete_mvccid, OR_MVCCID_SIZE, 0, mvccid_ptr); - /* Undo logging: clear flag. */ - *rv_undo_data = - log_rv_pack_undo_record_changes (*rv_undo_data, offset_to_object + OR_OID_VOLID, OR_SHORT_SIZE, - OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); - } - - /* Remove. */ - RECORD_MOVE_DATA (record, offset_to_delete_mvccid, offset_to_delete_mvccid + OR_MVCCID_SIZE); - btree_record_object_clear_mvcc_flags (oid_ptr, BTREE_OID_HAS_MVCC_DELID); - - if (redo_logging) - { - /* Redo logging: remove MVCCID. */ - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, offset_to_delete_mvccid, OR_MVCCID_SIZE, 0, NULL); - /* Redo logging: clear flag. */ - *rv_redo_data = - log_rv_pack_redo_record_changes (*rv_redo_data, offset_to_object + OR_OID_VOLID, OR_SHORT_SIZE, - OR_SHORT_SIZE, oid_ptr + OR_OID_VOLID); - } + btree_remove_mvccid (record, offset_to_object, offset_to_delete_mvccid, BTREE_OID_HAS_MVCC_DELID, rv_undo_data, + rv_redo_data); } #if !defined (NDEBUG) @@ -32695,9 +32619,7 @@ btree_record_add_delid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * r /* Compute offset to delete MVCCID. */ /* Instance OID is always packed. */ offset_to_delete_mvccid = offset_to_object + OR_OID_SIZE; - if (BTREE_IS_UNIQUE (btid_int->unique_pk) - && (node_type == BTREE_OVERFLOW_NODE || offset_to_object > 0 - || btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_CLASS_OID))) + if (btree_is_class_oid_packed (btid_int, record, node_type, (offset_to_object == 0))) { /* Class OID is also packed. */ offset_to_delete_mvccid += OR_OID_SIZE; @@ -32713,13 +32635,13 @@ btree_record_add_delid (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * r if (btree_record_object_is_flagged (oid_ptr, BTREE_OID_HAS_MVCC_DELID)) { /* Just replace the MVCCID. */ - btree_set_mvcc_delid (record, offset_to_delete_mvccid, &delete_mvccid, rv_undo_data, rv_redo_data); + btree_set_mvccid (record, offset_to_delete_mvccid, &delete_mvccid, rv_undo_data, rv_redo_data); } else { /* Insert delete MVCCID. */ - btree_add_mvcc_delid (record, offset_to_object, offset_to_delete_mvccid, &delete_mvccid, rv_undo_data, - rv_redo_data); + btree_add_mvccid (record, offset_to_object, offset_to_delete_mvccid, delete_mvccid, BTREE_OID_HAS_MVCC_DELID, + rv_undo_data, rv_redo_data); } #if !defined (NDEBUG) btree_check_valid_record (thread_p, btid_int, record, node_type, NULL); @@ -32996,6 +32918,7 @@ btree_delete_sysop_end (THREAD_ENTRY * thread_p, BTREE_DELETE_HELPER * helper) switch (helper->purpose) { case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: log_sysop_end_logical_undo (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, NULL, helper->rv_keyval_data_length, helper->rv_keyval_data); break; @@ -33007,6 +32930,7 @@ btree_delete_sysop_end (THREAD_ENTRY * thread_p, BTREE_DELETE_HELPER * helper) case BTREE_OP_DELETE_UNDO_INSERT: case BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD: case BTREE_OP_DELETE_UNDO_INSERT_DELID: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: log_sysop_end_logical_compensate (thread_p, &helper->reference_lsa); break; @@ -33016,6 +32940,7 @@ btree_delete_sysop_end (THREAD_ENTRY * thread_p, BTREE_DELETE_HELPER * helper) /* fall through to commit on release */ case BTREE_OP_DELETE_VACUUM_OBJECT: + case BTREE_OP_ONLINE_INDEX_IB_DELETE: log_sysop_commit (thread_p); break; @@ -33047,14 +32972,21 @@ btree_insert_sysop_end (THREAD_ENTRY * thread_p, BTREE_INSERT_HELPER * helper) switch (helper->purpose) { case BTREE_OP_INSERT_NEW_OBJECT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: log_sysop_end_logical_undo (thread_p, helper->rcvindex, helper->leaf_addr.vfid, helper->rv_keyval_data_length, helper->rv_keyval_data); break; case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: log_sysop_end_logical_compensate (thread_p, &helper->compensate_undo_nxlsa); break; + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + log_sysop_commit (thread_p); + break; + case BTREE_OP_INSERT_MVCC_DELID: case BTREE_OP_INSERT_MARK_DELETED: /* no system ops are expected! */ @@ -33101,6 +33033,20 @@ btree_purpose_to_string (BTREE_OP_PURPOSE purpose) return "BTREE_OP_DELETE_VACUUM_INSID"; case BTREE_OP_DELETE_VACUUM_OBJECT: return "BTREE_OP_DELETE_VACUUM_OBJECT"; + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + return "BTREE_OP_ONLINE_INDEX_TRAN_INSERT"; + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + return "BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF"; + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + return "BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE"; + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + return "case BTREE_OP_ONLINE_INDEX_TRAN_DELETE"; + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + return "BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT"; + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + return "BTREE_OP_ONLINE_INDEX_IB_INSERT"; + case BTREE_OP_ONLINE_INDEX_IB_DELETE: + return "BTREE_OP_ONLINE_INDEX_IB_DELETE"; default: assert (false); return "** UNKNOWN PURPOSE **"; @@ -33194,3 +33140,1836 @@ btree_get_perf_btree_page_type (THREAD_ENTRY * thread_p, PAGE_PTR page_ptr) } return PERF_PAGE_BTREE_ROOT; } + +// +// btree_online_index_check_state () - check online index state is valid +// +// state (in) : state +// +static inline void +btree_online_index_check_state (MVCCID state) +{ + assert (state == BTREE_ONLINE_INDEX_NORMAL_FLAG_STATE + || state == BTREE_ONLINE_INDEX_INSERT_FLAG_STATE || state == BTREE_ONLINE_INDEX_DELETE_FLAG_STATE); +} + +static inline bool +btree_online_index_is_insert_flag_state (MVCCID state) +{ + return state == BTREE_ONLINE_INDEX_INSERT_FLAG_STATE; +} + +static inline bool +btree_online_index_is_delete_flag_state (MVCCID state) +{ + return state == BTREE_ONLINE_INDEX_DELETE_FLAG_STATE; +} + +static inline bool +btree_online_index_is_normal_state (MVCCID state) +{ + return state == BTREE_ONLINE_INDEX_NORMAL_FLAG_STATE; +} + +static inline void +btree_online_index_set_insert_flag_state (MVCCID & state) +{ + state = BTREE_ONLINE_INDEX_INSERT_FLAG_STATE; +} + +static inline void +btree_online_index_set_delete_flag_state (MVCCID & state) +{ + state = BTREE_ONLINE_INDEX_DELETE_FLAG_STATE; +} + +static inline void +btree_online_index_set_normal_state (MVCCID & state) +{ + state = BTREE_ONLINE_INDEX_NORMAL_FLAG_STATE; +} + +// +// btree_online_index_dispatcher () - dispatch online index operation: populate insert/delete helper and choose +// appropriate root/traversal/leaf functions +// +// return : error code +// thread_p (in) : thread entry +// btid_int (in) : b-tree info +// key (in) : key +// class_oid (in) : class OID +// oid (in) : instance OID +// unique (in) : ... todo +// purpose (in) : function purpose +// +int +btree_online_index_dispatcher (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID * class_oid, OID * oid, + int unique, BTREE_OP_PURPOSE purpose, LOG_LSA * undo_nxlsa) +{ + int error_code = NO_ERROR; + /* Search key helper which will point to where data should inserted. */ + BTREE_SEARCH_KEY_HELPER search_key = BTREE_SEARCH_KEY_HELPER_INITIALIZER; + /* Processing key function: can insert an object or just a delete MVCCID. */ + BTREE_ROOT_WITH_KEY_FUNCTION *root_function = NULL; + BTREE_ADVANCE_WITH_KEY_FUNCTION *advance_function = NULL; + BTREE_PROCESS_KEY_FUNCTION *key_function = NULL; + BTREE_HELPER helper; + BTID_INT btid_int; + + helper.insert_helper = BTREE_INSERT_HELPER_INITIALIZER; + helper.delete_helper = BTREE_DELETE_HELPER_INITIALIZER; + + /* Safe guards */ + assert (oid != NULL); + assert (class_oid != NULL); + assert (purpose == BTREE_OP_ONLINE_INDEX_IB_INSERT || purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT + || purpose == BTREE_OP_ONLINE_INDEX_TRAN_DELETE || purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE + || purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT); + + /* Check for null keys. */ + if (DB_IS_NULL (key) || btree_multicol_key_is_null (key)) + { + /* We do not store NULL keys but we track them for unique indexes. */ + if (BTREE_IS_UNIQUE (unique)) + { + /* In this scenario, we have to write log for the update of local statistics, since we do not + * log the physical operation of a NULL key. + */ + if (purpose == BTREE_OP_ONLINE_INDEX_TRAN_DELETE || purpose == BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT) + { + /* DELETE operation, we decrement oids and nulls. */ + logtb_tran_update_unique_stats (thread_p, btid, 0, -1, -1, true); + } + else + { + /* Insert operation, we increment oids and nulls. */ + logtb_tran_update_unique_stats (thread_p, btid, 0, 1, 1, true); + } + } + + return NO_ERROR; + } + + /* Save OID, class OID and MVCC info in insert helper. */ + COPY_OID (BTREE_INSERT_OID (&helper.insert_helper), oid); + COPY_OID (BTREE_DELETE_OID (&helper.delete_helper), oid); + if (class_oid != NULL) + { + COPY_OID (BTREE_INSERT_CLASS_OID (&helper.insert_helper), class_oid); + COPY_OID (BTREE_DELETE_CLASS_OID (&helper.delete_helper), class_oid); + } + else + { + OID_SET_NULL (BTREE_INSERT_CLASS_OID (&helper.insert_helper)); + OID_SET_NULL (BTREE_DELETE_CLASS_OID (&helper.delete_helper)); + } + + if (undo_nxlsa != NULL) + { + LSA_COPY (&helper.insert_helper.compensate_undo_nxlsa, undo_nxlsa); + LSA_COPY (&helper.delete_helper.reference_lsa, undo_nxlsa); + } + + helper.insert_helper.log_operations = prm_get_bool_value (PRM_ID_LOG_BTREE_OPS); + helper.delete_helper.log_operations = prm_get_bool_value (PRM_ID_LOG_BTREE_OPS); + + switch (purpose) + { + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + /* This is an insert done by the index builder. */ + helper.insert_helper.op_type = SINGLE_ROW_INSERT; + helper.insert_helper.purpose = purpose; + root_function = btree_fix_root_for_insert; + advance_function = btree_split_node_and_advance; + key_function = btree_key_online_index_IB_insert; + break; + + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + helper.insert_helper.op_type = SINGLE_ROW_INSERT; + helper.insert_helper.purpose = purpose; + root_function = btree_fix_root_for_insert; + advance_function = btree_split_node_and_advance; + key_function = btree_key_online_index_tran_insert; + break; + + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + helper.delete_helper.op_type = SINGLE_ROW_DELETE; + helper.delete_helper.purpose = purpose; + root_function = btree_fix_root_for_delete; + advance_function = btree_merge_node_and_advance; + key_function = btree_key_online_index_tran_delete; + + error_code = + btree_search_key_and_apply_functions (thread_p, btid, &btid_int, key, root_function, &helper.delete_helper, + advance_function, &helper.delete_helper, key_function, &helper, + &search_key, NULL); + + if (error_code == NO_ERROR && search_key.result == BTREE_KEY_NOTFOUND) + { + /* We failed to find the object in the index. We must traverse again the btree and treat the operation + * as an insert with DELETE_FLAG set. + */ + helper.insert_helper.purpose = purpose; + helper.insert_helper.op_type = SINGLE_ROW_INSERT; + if (helper.delete_helper.purpose == BTREE_OP_ONLINE_INDEX_TRAN_DELETE) + { + helper.insert_helper.purpose = BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF; + } + root_function = btree_fix_root_for_insert; + advance_function = btree_split_node_and_advance; + key_function = btree_key_online_index_tran_insert_DF; + break; // Fall through. + } + else + { + goto end; + } + + default: + /* This should never happen. */ + assert (false); + return ER_FAILED; + } + + error_code = + btree_search_key_and_apply_functions (thread_p, btid, &btid_int, key, root_function, &helper.insert_helper, + advance_function, &helper.insert_helper, key_function, &helper, &search_key, + NULL); + +end: + + if (helper.insert_helper.printed_key != NULL) + { + db_private_free (thread_p, helper.insert_helper.printed_key); + } + + if (helper.delete_helper.printed_key != NULL && helper.delete_helper.printed_key != helper.insert_helper.printed_key) + { + db_private_free (thread_p, helper.delete_helper.printed_key); + } + + return error_code; +} + +/* + * btree_key_online_index_IB_insert () - BTREE_PROCESS_KEY_FUNCTION used for inserting a new object in b-tree during + * online index loading. + * + * return : Error code. + * thread_p (in) : Thread entry. + * btid_int (in) : B-tree info. + * key (int) : Key info + * leaf_page (in) : Pointer to the leaf page. + * search_key (in) : Search helper + * restart (in/out): Restart + * args (in/out) : BTREE_INSERT_HELPER *. + */ +int +btree_key_online_index_IB_insert (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, + void *other_args) +{ + BTREE_HELPER *helper = (BTREE_HELPER *) other_args; + int error_code = NO_ERROR; /* Error code. */ + RECDES record; /* Record descriptor for leaf key record. */ + LEAF_REC leaf_info; /* Leaf record info. */ + int offset_after_key; /* Offset in record data where packed key is ended. */ + bool dummy_clear_key; /* Dummy field used as argument for btree_read_record. */ + PAGE_PTR page_found = NULL; + int offset_to_object = 0; + BTREE_MVCC_INFO btree_mvcc_info = BTREE_MVCC_INFO_INITIALIZER; + PAGE_PTR prev_page = NULL; + BTREE_NODE_TYPE node_type; + /* Redo recovery structures. */ + char rv_redo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; + char *rv_redo_data = PTR_ALIGN (rv_redo_data_buffer, BTREE_MAX_ALIGN); + char *rv_redo_data_ptr = rv_redo_data; + int rv_redo_data_length = 0; + LOG_DATA_ADDR addr; + LOG_LSA prev_lsa; + PGSLOTID slotid; + char rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + char new_rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + RECDES new_record; + int n_keys = 0; + int n_oids = 0; + + record.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); + record.area_size = IO_MAX_PAGE_SIZE; + + new_record.data = PTR_ALIGN (new_rec_buf, BTREE_MAX_ALIGN); + new_record.area_size = IO_MAX_PAGE_SIZE; + + /* Redo logging. */ + helper->insert_helper.rv_redo_data = rv_redo_data; + helper->insert_helper.rv_redo_data_ptr = helper->insert_helper.rv_redo_data; + + helper->insert_helper.leaf_addr.offset = search_key->slotid; + helper->insert_helper.leaf_addr.pgptr = *leaf_page; + helper->insert_helper.leaf_addr.vfid = &btid_int->sys_btid->vfid; + + /* We are in leaf level now, and we must inspect if we have found the OID inside the key. */ + if (search_key->result == BTREE_KEY_FOUND) + { + /* Get the record. */ + if (spage_get_record (thread_p, *leaf_page, search_key->slotid, &record, COPY) != S_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* Read the record. */ + error_code = + btree_read_record (thread_p, btid_int, *leaf_page, &record, NULL, &leaf_info, BTREE_LEAF_NODE, + &dummy_clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + error_code = + btree_find_oid_with_page_and_record (thread_p, btid_int, &helper->insert_helper.obj_info.oid, *leaf_page, + helper->insert_helper.purpose, NULL, &record, &leaf_info, + offset_after_key, &page_found, &prev_page, &offset_to_object, + &btree_mvcc_info, &new_record); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + goto end; + } + + node_type = (page_found == *leaf_page) ? BTREE_LEAF_NODE : BTREE_OVERFLOW_NODE; + + if (node_type == BTREE_OVERFLOW_NODE) + { + slotid = 1; + } + else + { + slotid = search_key->slotid; + } + + if (offset_to_object != NOT_FOUND) + { + /* Inspect the object and its MVCC_INFO. */ + /* This is the index builder, therefore if there is already an OID that matches the one that needs to be + * inserted, then the already inserted one should have either DELETE_FLAG or INSERT_FLAG set. + */ + btree_online_index_check_state (btree_mvcc_info.insert_mvccid); + + if (btree_online_index_is_insert_flag_state (btree_mvcc_info.insert_mvccid)) + { + /* INSERT_FLAG is set. It means we have to remove the flag, according to the state machine. */ + btree_online_index_set_normal_state (btree_mvcc_info.insert_mvccid); + + /* Prepare logging data. */ + addr.offset = slotid; + addr.pgptr = page_found; + addr.vfid = &btid_int->sys_btid->vfid; + + if (node_type == BTREE_OVERFLOW_NODE) + { + BTREE_RV_SET_OVERFLOW_NODE (&addr); + } + LOG_RV_RECORD_SET_MODIFY_MODE (&addr, LOG_RV_RECORD_UPDATE_PARTIAL); + + btree_online_index_change_state (thread_p, btid_int, &new_record, node_type, offset_to_object, + btree_mvcc_info.insert_mvccid, NULL, + &helper->insert_helper.rv_redo_data_ptr); + + /* Add the logged info. */ + /* Update in page. */ + if (spage_update (thread_p, page_found, slotid, &new_record) != SP_SUCCESS) + { + /* Unexpected. */ + assert_release (false); + error_code = ER_FAILED; + goto end; + } + + FI_TEST (thread_p, FI_TEST_BTREE_MANAGER_RANDOM_EXIT, 0); + + /* We need to log previous lsa. */ + LSA_COPY (&prev_lsa, pgbuf_get_lsa (page_found)); + + /* Logging. */ + BTREE_RV_GET_DATA_LENGTH (helper->insert_helper.rv_redo_data_ptr, helper->insert_helper.rv_redo_data, + rv_redo_data_length); + log_append_redo_data (thread_p, RVBT_RECORD_MODIFY_NO_UNDO, &addr, rv_redo_data_length, + helper->insert_helper.rv_redo_data); + + btree_insert_log (&helper->insert_helper, + BTREE_INSERT_MODIFY_MSG ("IB insert change from INSERT_FLAG to NORMAL_STATE"), + BTREE_INSERT_MODIFY_ARGS (thread_p, &helper->insert_helper, page_found, &prev_lsa, + node_type == BTREE_LEAF_NODE, slotid, new_record.length, + btid_int->sys_btid)); + + FI_TEST (thread_p, FI_TEST_BTREE_MANAGER_RANDOM_EXIT, 0); + + pgbuf_set_dirty (thread_p, page_found, DONT_FREE); + + goto end; + } + else + { + assert (btree_online_index_is_delete_flag_state (btree_mvcc_info.insert_mvccid)); + + btree_insert_helper_to_delete_helper (&helper->insert_helper, &helper->delete_helper); + helper->delete_helper.purpose = BTREE_OP_ONLINE_INDEX_IB_DELETE; + helper->delete_helper.op_type = SINGLE_ROW_DELETE; + + if (node_type == BTREE_LEAF_NODE + && (btree_record_get_num_oids (thread_p, btid_int, &new_record, offset_after_key, node_type) == 1)) + { + /* Only one OID in the key, we will remove the key as well. */ + n_keys = -1; + } + n_oids = -1; + + error_code = + btree_key_remove_object (thread_p, key, btid_int, &helper->delete_helper, *leaf_page, &record, + &leaf_info, offset_after_key, search_key, &page_found, prev_page, + node_type, offset_to_object); + goto end; + } + } + else + { + /* Key was found but the object wasn't. We must append the object to the current key. */ + + /* Safeguards. */ + assert (search_key->result == BTREE_KEY_FOUND && offset_to_object == NOT_FOUND); + + n_oids = 1; + + error_code = + btree_key_append_object_non_unique (thread_p, btid_int, key, *leaf_page, search_key, &new_record, + offset_after_key, &leaf_info, &helper->insert_helper.obj_info, + &helper->insert_helper); + } + } + else + { + /* Key was not found, we must insert it. */ + n_keys = 1; + n_oids = 1; + + error_code = btree_key_insert_new_key (thread_p, btid_int, key, *leaf_page, &helper->insert_helper, search_key); + } + +end: + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, n_keys, n_oids, 0, false); + } + + return error_code; +} + +/* + * btree_key_online_index_tran_insert () - BTREE_PROCESS_KEY_FUNCTION used for inserting a new object + * in b-tree during online index loading. + * + * return : Error code. + * thread_p (in) : Thread entry. + * btid_int (in) : B-tree info. + * key (int) : Key info + * leaf_page (in) : Pointer to the leaf page. + * search_key (in) : Search helper + * restart (in/out): Restart + * args (in/out) : BTREE_INSERT_HELPER *. + */ +static int +btree_key_online_index_tran_insert (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, + void *other_args) +{ + BTREE_HELPER *helper = (BTREE_HELPER *) other_args; + int error_code = NO_ERROR; /* Error code. */ + RECDES record; /* Record descriptor for leaf key record. */ + LEAF_REC leaf_info; /* Leaf record info. */ + int offset_after_key; /* Offset in record data where packed key is ended. */ + bool dummy_clear_key; /* Dummy field used as argument for btree_read_record. */ + PAGE_PTR page_found = NULL; + int offset_to_object = 0; + BTREE_MVCC_INFO btree_mvcc_info = BTREE_MVCC_INFO_INITIALIZER; + PAGE_PTR prev_page = NULL; + BTREE_NODE_TYPE node_type; + RECDES new_record; + PGSLOTID slotid; + LOG_LSA prev_lsa; + + LOG_DATA_ADDR addr; + + char rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + record.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); + record.area_size = IO_MAX_PAGE_SIZE; + + char new_rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + new_record.data = PTR_ALIGN (new_rec_buf, BTREE_MAX_ALIGN); + new_record.area_size = IO_MAX_PAGE_SIZE; + + char *rv_undo_data = NULL; + int rv_undo_data_capacity = IO_MAX_PAGE_SIZE; + char rv_undo_data_buffer[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + char *rv_undo_data_bufalign = PTR_ALIGN (rv_undo_data_buffer, BTREE_MAX_ALIGN); + + char rv_redo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; + char *rv_redo_data = PTR_ALIGN (rv_redo_data_buffer, BTREE_MAX_ALIGN); + char *rv_redo_data_ptr = rv_redo_data; + int rv_redo_data_length = 0; + + helper->insert_helper.rv_redo_data = rv_redo_data; + helper->insert_helper.rv_redo_data_ptr = helper->insert_helper.rv_redo_data; + + helper->insert_helper.leaf_addr.offset = search_key->slotid; + helper->insert_helper.leaf_addr.pgptr = *leaf_page; + helper->insert_helper.leaf_addr.vfid = &btid_int->sys_btid->vfid; + + /* Undo logging. */ + if (helper->insert_helper.purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT + || helper->insert_helper.purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF) + { + error_code = + btree_rv_save_keyval_for_undo (btid_int, key, BTREE_INSERT_CLASS_OID (&helper->insert_helper), + BTREE_INSERT_OID (&helper->insert_helper), + BTREE_INSERT_MVCC_INFO (&helper->insert_helper), helper->insert_helper.purpose, + rv_undo_data_bufalign, &helper->insert_helper.rv_keyval_data, + &rv_undo_data_capacity, &helper->insert_helper.rv_keyval_data_length); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + + /* We are in leaf level now, and we must inspect if we have found the OID inside the key. */ + if (search_key->result == BTREE_KEY_FOUND) + { + /* We search the key for the OID. If we find it, we should find it with DELETE_FLAG set, therefore we must + * delete it in place. + */ + + /* Get the record. */ + if (spage_get_record (thread_p, *leaf_page, search_key->slotid, &record, COPY) != S_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* Read the record. */ + error_code = + btree_read_record (thread_p, btid_int, *leaf_page, &record, NULL, &leaf_info, BTREE_LEAF_NODE, + &dummy_clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + error_code = + btree_find_oid_with_page_and_record (thread_p, btid_int, &helper->insert_helper.obj_info.oid, *leaf_page, + helper->insert_helper.purpose, NULL, &record, &leaf_info, offset_after_key, + &page_found, &prev_page, &offset_to_object, &btree_mvcc_info, &new_record); + + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + node_type = (page_found == *leaf_page) ? BTREE_LEAF_NODE : BTREE_OVERFLOW_NODE; + + if (offset_to_object != NOT_FOUND) + { + /* Inspect the key and its MVCC_INFO. This is the transactional insert, which means that if we can find the + * object, then the object must have DELETE_FLAG set. + */ + btree_online_index_check_state (btree_mvcc_info.insert_mvccid); + assert (btree_online_index_is_delete_flag_state (btree_mvcc_info.insert_mvccid)); + + /* Here we must change the state to insert flag. */ + if (node_type == BTREE_LEAF_NODE) + { + slotid = search_key->slotid; + } + else + { + slotid = 1; + } + + /* Prepare logging. */ + addr.offset = slotid; + addr.pgptr = page_found; + addr.vfid = &btid_int->sys_btid->vfid; + + /* Redo logging. */ + if (node_type == BTREE_OVERFLOW_NODE) + { + BTREE_RV_SET_OVERFLOW_NODE (&addr); + } + LOG_RV_RECORD_SET_MODIFY_MODE (&addr, LOG_RV_RECORD_UPDATE_PARTIAL); + + /* Set the new state to INSERT_FLAG. */ + btree_online_index_set_insert_flag_state (btree_mvcc_info.insert_mvccid); + + /* Change the state of the record. */ + btree_online_index_change_state (thread_p, btid_int, &new_record, node_type, offset_to_object, + btree_mvcc_info.insert_mvccid, NULL, &rv_redo_data_ptr); + + if (spage_update (thread_p, page_found, slotid, &new_record) != SP_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* We need to log previous lsa. */ + LSA_COPY (&prev_lsa, pgbuf_get_lsa (page_found)); + + /* Logging. */ + BTREE_RV_GET_DATA_LENGTH (rv_redo_data_ptr, rv_redo_data, rv_redo_data_length); + + btree_insert_log (&helper->insert_helper, + BTREE_INSERT_MODIFY_MSG ("Tran insert change from DELETE_FLAG to INSERT_FLAG"), + BTREE_INSERT_MODIFY_ARGS (thread_p, &helper->insert_helper, page_found, &prev_lsa, + node_type == BTREE_LEAF_NODE, slotid, new_record.length, + btid_int->sys_btid)); + + btree_rv_log_insert_object (thread_p, helper->insert_helper, addr, 0, rv_redo_data_length, NULL, + rv_redo_data); + + pgbuf_set_dirty (thread_p, page_found, DONT_FREE); + + return error_code; + } + else + { + /* Key was found but the object wasn't. We must append the object to the current key. */ + /* Safeguards. */ + assert (search_key->result == BTREE_KEY_FOUND && offset_to_object == NOT_FOUND); + + error_code = + btree_key_append_object_non_unique (thread_p, btid_int, key, *leaf_page, search_key, &new_record, + offset_after_key, &leaf_info, &helper->insert_helper.obj_info, + &helper->insert_helper); + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + // Append a single object. + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, 0, 1, 0, false); + } + + return error_code; + } + } + else + { + /* Key was not found, we must insert it. */ + error_code = btree_key_insert_new_key (thread_p, btid_int, key, *leaf_page, &helper->insert_helper, search_key); + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + /* Insert a key with an object. */ + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, 1, 1, 0, false); + } + return error_code; + } + + return error_code; +} + +/* + * btree_key_online_index_tran_delete () - BTREE_PROCESS_KEY_FUNCTION used for deleting an object + * in b-tree during online index loading. + * + * return : Error code. + * thread_p (in) : Thread entry. + * btid_int (in) : B-tree info. + * key (int) : Key info + * leaf_page (in) : Pointer to the leaf page. + * search_key (in) : Search helper + * restart (in/out): Restart + * args (in/out) : BTREE_INSERT_HELPER *. + */ +static int +btree_key_online_index_tran_delete (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, + void *other_args) +{ + BTREE_HELPER *helper = (BTREE_HELPER *) other_args; + int error_code = NO_ERROR; /* Error code. */ + RECDES record; /* Record descriptor for leaf key record. */ + LEAF_REC leaf_info; /* Leaf record info. */ + int offset_after_key; /* Offset in record data where packed key is ended. */ + bool dummy_clear_key; /* Dummy field used as argument for btree_read_record. */ + PAGE_PTR page_found = NULL; + int offset_to_object = 0; + BTREE_MVCC_INFO btree_mvcc_info = BTREE_MVCC_INFO_INITIALIZER; + PAGE_PTR prev_page = NULL; + BTREE_NODE_TYPE node_type; + char *rv_dummy_undo_data = NULL; + char rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + + LOG_DATA_ADDR addr; + LOG_LSA prev_lsa; + PGSLOTID slotid; + RECDES new_record; + + char new_rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + new_record.data = PTR_ALIGN (new_rec_buf, BTREE_MAX_ALIGN); + new_record.area_size = IO_MAX_PAGE_SIZE; + + record.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); + record.area_size = IO_MAX_PAGE_SIZE; + + char *rv_undo_data = NULL; + int rv_undo_data_capacity = IO_MAX_PAGE_SIZE; + char rv_undo_data_buffer[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + char *rv_undo_data_bufalign = PTR_ALIGN (rv_undo_data_buffer, BTREE_MAX_ALIGN); + + char rv_redo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; + char *rv_redo_data = PTR_ALIGN (rv_redo_data_buffer, BTREE_MAX_ALIGN); + char *rv_redo_data_ptr = rv_redo_data; + int rv_redo_data_length = 0; + + int n_keys = 0; + int n_oids = 0; + + int key_len; + + helper->delete_helper.rv_keyval_data = rv_undo_data_bufalign; + if (helper->delete_helper.purpose == BTREE_OP_ONLINE_INDEX_TRAN_DELETE) + { + error_code = + btree_rv_save_keyval_for_undo (btid_int, key, BTREE_DELETE_CLASS_OID (&helper->delete_helper), + BTREE_DELETE_OID (&helper->delete_helper), + BTREE_DELETE_MVCC_INFO (&helper->delete_helper), helper->delete_helper.purpose, + rv_undo_data_bufalign, &helper->delete_helper.rv_keyval_data, + &rv_undo_data_capacity, &helper->delete_helper.rv_keyval_data_length); + + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + + helper->delete_helper.leaf_addr.offset = search_key->slotid; + helper->delete_helper.leaf_addr.pgptr = *leaf_page; + helper->delete_helper.leaf_addr.vfid = &btid_int->sys_btid->vfid; + + helper->delete_helper.rv_redo_data = rv_redo_data; + helper->delete_helper.rv_redo_data_ptr = rv_redo_data_ptr; + + /* We are in leaf level now, and we must inspect if we have found the OID inside the key. */ + if (search_key->result == BTREE_KEY_FOUND) + { + /* We search the key for the OID. If we find it, we should find it with DELETE_FLAG set, therefore we must + * delete it in place. + */ + + /* Get the record. */ + if (spage_get_record (thread_p, *leaf_page, search_key->slotid, &record, COPY) != S_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* Read the record. */ + error_code = + btree_read_record (thread_p, btid_int, *leaf_page, &record, NULL, &leaf_info, BTREE_LEAF_NODE, + &dummy_clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + error_code = + btree_find_oid_with_page_and_record (thread_p, btid_int, &helper->delete_helper.object_info.oid, *leaf_page, + helper->delete_helper.purpose, NULL, &record, &leaf_info, offset_after_key, + &page_found, &prev_page, &offset_to_object, &btree_mvcc_info, &new_record); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + node_type = (page_found == *leaf_page) ? BTREE_LEAF_NODE : BTREE_OVERFLOW_NODE; + + if (offset_to_object != NOT_FOUND) + { + /* Inspect the key and its MVCC_INFO. If we find the object, then the object should have either INSERT_FLAG + * set, or it should be without any flags set. + */ + btree_online_index_check_state (btree_mvcc_info.insert_mvccid); + + if (node_type == BTREE_LEAF_NODE) + { + slotid = search_key->slotid; + } + else + { + slotid = 1; + } + + if (btree_online_index_is_insert_flag_state (btree_mvcc_info.insert_mvccid)) + { + /* Insert flag set. We must change the flag to DELETE_FLAG. */ + + /* Redo logging. */ + if (node_type == BTREE_OVERFLOW_NODE) + { + BTREE_RV_SET_OVERFLOW_NODE (&helper->delete_helper.leaf_addr); + } + LOG_RV_RECORD_SET_MODIFY_MODE (&addr, LOG_RV_RECORD_UPDATE_PARTIAL); + + /* Logging. */ + addr.pgptr = page_found; + addr.offset = slotid; + addr.vfid = &btid_int->sys_btid->vfid; + + /* Set the new state to DELETE_FLAG. */ + btree_online_index_set_delete_flag_state (btree_mvcc_info.insert_mvccid); + + /* Change the state of the record. */ + btree_online_index_change_state (thread_p, btid_int, &new_record, node_type, offset_to_object, + btree_mvcc_info.insert_mvccid, NULL, &rv_redo_data_ptr); + + if (spage_update (thread_p, page_found, slotid, &new_record) != SP_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* We need to log previous lsa. */ + LSA_COPY (&prev_lsa, pgbuf_get_lsa (page_found)); + + /* Logging. */ + BTREE_RV_GET_DATA_LENGTH (rv_redo_data_ptr, rv_redo_data, rv_redo_data_length); + + btree_delete_log (&helper->delete_helper, + BTREE_DELETE_MODIFY_MSG ("Tran delete change from INSERT_FLAG to DELETE_FLAG"), + BTREE_DELETE_MODIFY_ARGS (thread_p, &helper->delete_helper, page_found, &prev_lsa, + node_type == BTREE_LEAF_NODE, slotid, new_record.length, + btid_int->sys_btid)); + + btree_rv_log_delete_object (thread_p, helper->delete_helper, helper->delete_helper.leaf_addr, 0, + rv_redo_data_length, NULL, rv_redo_data); + + pgbuf_set_dirty (thread_p, page_found, DONT_FREE); + + return error_code; + } + else + { + /* Normal state. We need to physically delete the object. */ + assert (btree_online_index_is_normal_state (btree_mvcc_info.insert_mvccid)); + if (node_type == BTREE_LEAF_NODE + && (btree_record_get_num_oids (thread_p, btid_int, &new_record, offset_after_key, node_type) == 1)) + { + /* Only one OID in the key, we will remove the key as well. */ + n_keys = -1; + } + n_oids = -1; + + error_code = + btree_key_remove_object (thread_p, key, btid_int, &helper->delete_helper, *leaf_page, &new_record, + &leaf_info, offset_after_key, search_key, &page_found, prev_page, node_type, + offset_to_object); + + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, n_keys, n_oids, 0, false); + } + + return error_code; + } + } + else + { + ; /* Fall through and do the usual case. */ + } + } + + /* We did not find the object. We have to check if there is enough space in the leaf for the object. If there is, + * we insert it in place without any restarts. + */ + + btree_delete_helper_to_insert_helper (&helper->delete_helper, &helper->insert_helper); + helper->insert_helper.purpose = BTREE_OP_ONLINE_INDEX_TRAN_INSERT; + helper->insert_helper.op_type = SINGLE_ROW_INSERT; + + /* delete_helper does not hold information regarding the length of the key in page. + * We need this information so that we can check whether we have enough space to insert the new object. + */ + + key_len = btree_get_disk_size_of_key (key); + helper->insert_helper.key_len_in_page = BTREE_GET_KEY_LEN_IN_PAGE (key_len); + + if (!btree_key_insert_does_leaf_need_split (thread_p, btid_int, *leaf_page, &helper->insert_helper, search_key)) + { + /* There is enough space. */ + + /* Set DELETE_FLAG in the helper structure. */ + helper->insert_helper.obj_info.mvcc_info.flags |= BTREE_OID_HAS_MVCC_INSID; + btree_online_index_set_delete_flag_state (helper->insert_helper.obj_info.mvcc_info.insert_mvccid); + + helper->insert_helper.purpose = BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF; + if (search_key->result == BTREE_KEY_FOUND) + { + error_code = + btree_key_append_object_non_unique (thread_p, btid_int, key, *leaf_page, search_key, &new_record, + offset_after_key, &leaf_info, &helper->insert_helper.obj_info, + &helper->insert_helper); + } + else + { + error_code = btree_key_insert_new_key (thread_p, btid_int, key, *leaf_page, &helper->insert_helper, + search_key); + n_keys = 1; + } + + n_oids = 1; + + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, n_keys, n_oids, 0, false); + } + + return error_code; + } + + /* Not enough space. We have to restart the traverse and try to insert the object with DELETE_FLAG set. */ + search_key->result = BTREE_KEY_NOTFOUND; + return error_code; +} + +/* + * btree_key_online_index_tran_insert_DF () - BTREE_PROCESS_KEY_FUNCTION used for inserting a new object + * with DELETE_FLAG set in b-tree during online index loading. + * + * return : Error code. + * thread_p (in) : Thread entry. + * btid_int (in) : B-tree info. + * key (int) : Key info + * leaf_page (in) : Pointer to the leaf page. + * search_key (in) : Search helper + * restart (in/out): Restart + * args (in/out) : BTREE_INSERT_HELPER *. + */ +static int +btree_key_online_index_tran_insert_DF (THREAD_ENTRY * thread_p, BTID_INT * btid_int, DB_VALUE * key, + PAGE_PTR * leaf_page, BTREE_SEARCH_KEY_HELPER * search_key, bool * restart, + void *other_args) +{ + BTREE_HELPER *helper = (BTREE_HELPER *) other_args; + int error_code = NO_ERROR; /* Error code. */ + RECDES record; /* Record descriptor for leaf key record. */ + LEAF_REC leaf_info; /* Leaf record info. */ + int offset_after_key; /* Offset in record data where packed key is ended. */ + bool dummy_clear_key; /* Dummy field used as argument for btree_read_record. */ + PAGE_PTR page_found = NULL; + int offset_to_object = 0; + BTREE_MVCC_INFO btree_mvcc_info = BTREE_MVCC_INFO_INITIALIZER; + PAGE_PTR prev_page = NULL; + BTREE_NODE_TYPE node_type; + + LOG_DATA_ADDR addr; + LOG_LSA prev_lsa; + PGSLOTID slotid; + RECDES new_record; + char rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + char new_rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + + new_record.data = PTR_ALIGN (new_rec_buf, BTREE_MAX_ALIGN); + new_record.area_size = IO_MAX_PAGE_SIZE; + + record.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); + record.area_size = IO_MAX_PAGE_SIZE; + + char *rv_undo_data = NULL; + int rv_undo_data_capacity = IO_MAX_PAGE_SIZE; + char rv_undo_data_buffer[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + char *rv_undo_data_bufalign = PTR_ALIGN (rv_undo_data_buffer, BTREE_MAX_ALIGN); + + char rv_redo_data_buffer[BTREE_RV_BUFFER_SIZE + BTREE_MAX_ALIGN]; + char *rv_redo_data = PTR_ALIGN (rv_redo_data_buffer, BTREE_MAX_ALIGN); + char *rv_redo_data_ptr = rv_redo_data; + int rv_redo_data_length = 0; + + int n_keys = 0; + int n_oids = 0; + + /* Save the key for undo process. */ + helper->insert_helper.rv_keyval_data = rv_undo_data_bufalign; + if (helper->insert_helper.purpose == BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF) + { + error_code = + btree_rv_save_keyval_for_undo (btid_int, key, BTREE_INSERT_CLASS_OID (&helper->insert_helper), + BTREE_INSERT_OID (&helper->insert_helper), + BTREE_INSERT_MVCC_INFO (&helper->insert_helper), helper->insert_helper.purpose, + rv_undo_data_bufalign, &helper->insert_helper.rv_keyval_data, + &rv_undo_data_capacity, &helper->insert_helper.rv_keyval_data_length); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + } + + helper->insert_helper.leaf_addr.offset = search_key->slotid; + helper->insert_helper.leaf_addr.pgptr = *leaf_page; + helper->insert_helper.leaf_addr.vfid = &btid_int->sys_btid->vfid; + + /* Redo logging. */ + helper->insert_helper.rv_redo_data = rv_redo_data; + helper->insert_helper.rv_redo_data_ptr = helper->insert_helper.rv_redo_data; + + /* We are in leaf level now, and we must inspect if we have found the OID inside the key. */ + if (search_key->result == BTREE_KEY_FOUND) + { + /* We search the key for the OID. */ + + /* Get the record. */ + if (spage_get_record (thread_p, *leaf_page, search_key->slotid, &record, COPY) != S_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* Read the record. */ + error_code = + btree_read_record (thread_p, btid_int, *leaf_page, &record, NULL, &leaf_info, BTREE_LEAF_NODE, + &dummy_clear_key, &offset_after_key, PEEK_KEY_VALUE, NULL); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + error_code = + btree_find_oid_with_page_and_record (thread_p, btid_int, &helper->insert_helper.obj_info.oid, *leaf_page, + helper->insert_helper.purpose, NULL, &record, &leaf_info, offset_after_key, + &page_found, &prev_page, &offset_to_object, &btree_mvcc_info, &new_record); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + node_type = (page_found == *leaf_page) ? BTREE_LEAF_NODE : BTREE_OVERFLOW_NODE; + + if (offset_to_object != NOT_FOUND) + { + /* Inspect the key and its MVCC_INFO. This is the transactional insert with DELETE_FLAG, which means + * that if we can find the object, then the object must have either INSERT_FLAG set, or the object + * should be in normal state. + */ + assert (!btree_online_index_is_delete_flag_state (btree_mvcc_info.insert_mvccid)); + + if (node_type == BTREE_LEAF_NODE) + { + slotid = search_key->slotid; + } + else + { + slotid = 1; + } + + if (btree_online_index_is_normal_state (btree_mvcc_info.insert_mvccid)) + { + /* This translates into a physical delete as the object has already been inserted into the btree. */ + /* Normal state. We need to physically delete the object. */ + assert (btree_online_index_is_normal_state (btree_mvcc_info.insert_mvccid)); + + btree_insert_helper_to_delete_helper (&helper->insert_helper, &helper->delete_helper); + + helper->delete_helper.purpose = BTREE_OP_ONLINE_INDEX_TRAN_DELETE; + helper->delete_helper.op_type = SINGLE_ROW_DELETE; + + if (node_type == BTREE_LEAF_NODE + && (btree_record_get_num_oids (thread_p, btid_int, &new_record, offset_after_key, node_type) == 1)) + { + /* Only one OID in the key, we will remove the key as well. */ + n_keys = -1; + } + n_oids = -1; + + error_code = + btree_key_remove_object (thread_p, key, btid_int, &helper->delete_helper, *leaf_page, &record, + &leaf_info, offset_after_key, search_key, &page_found, prev_page, node_type, + offset_to_object); + + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, n_keys, n_oids, 0, false); + } + return error_code; + } + else + { + /* We must have INSERT_FLAG set. */ + assert (btree_online_index_is_insert_flag_state (btree_mvcc_info.insert_mvccid)); + + /* We have to change the state to DELETE_FLAG. */ + + /* Prepare logging. */ + addr.offset = slotid; + addr.pgptr = page_found; + addr.vfid = &btid_int->sys_btid->vfid; + + /* Redo logging. */ + if (node_type == BTREE_OVERFLOW_NODE) + { + BTREE_RV_SET_OVERFLOW_NODE (&addr); + } + LOG_RV_RECORD_SET_MODIFY_MODE (&addr, LOG_RV_RECORD_UPDATE_PARTIAL); + + /* Set the new state to INSERT_FLAG. */ + btree_online_index_set_delete_flag_state (btree_mvcc_info.insert_mvccid); + + /* Change the state of the record. */ + btree_online_index_change_state (thread_p, btid_int, &new_record, node_type, offset_to_object, + btree_mvcc_info.insert_mvccid, NULL, &rv_redo_data_ptr); + + if (spage_update (thread_p, page_found, slotid, &new_record) != SP_SUCCESS) + { + assert_release (false); + error_code = ER_FAILED; + return error_code; + } + + /* We need to log previous lsa. */ + LSA_COPY (&prev_lsa, pgbuf_get_lsa (page_found)); + + /* Logging. */ + BTREE_RV_GET_DATA_LENGTH (rv_redo_data_ptr, rv_redo_data, rv_redo_data_length); + + btree_insert_log (&helper->insert_helper, + BTREE_INSERT_MODIFY_MSG ("Tran delete change from INSERT_FLAG to DELETE_FLAG"), + BTREE_INSERT_MODIFY_ARGS (thread_p, &helper->insert_helper, page_found, &prev_lsa, + node_type == BTREE_LEAF_NODE, slotid, new_record.length, + btid_int->sys_btid)); + + btree_rv_log_insert_object (thread_p, helper->insert_helper, addr, 0, rv_redo_data_length, NULL, + rv_redo_data); + + pgbuf_set_dirty (thread_p, page_found, DONT_FREE); + + return error_code; + } + } + else + { + /* Key was found but the object wasn't. We must append the object to the current key. */ + /* Safeguards. */ + assert (search_key->result == BTREE_KEY_FOUND && offset_to_object == NOT_FOUND); + + /* We did not find the object. We have to insert it with DELETE_FLAG set. */ + helper->insert_helper.obj_info.mvcc_info.flags |= BTREE_OID_HAS_MVCC_INSID; + btree_online_index_set_delete_flag_state (helper->insert_helper.obj_info.mvcc_info.insert_mvccid); + + error_code = + btree_key_append_object_non_unique (thread_p, btid_int, key, *leaf_page, search_key, &new_record, + offset_after_key, &leaf_info, &helper->insert_helper.obj_info, + &helper->insert_helper); + + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, 0, 1, 0, false); + } + } + } + else + { + /* Key was not found, we must insert it. */ + /* We have to insert it with DELETE_FLAG set. */ + helper->insert_helper.obj_info.mvcc_info.flags |= BTREE_OID_HAS_MVCC_INSID; + btree_online_index_set_delete_flag_state (helper->insert_helper.obj_info.mvcc_info.insert_mvccid); + + error_code = btree_key_insert_new_key (thread_p, btid_int, key, *leaf_page, &helper->insert_helper, search_key); + if (error_code == NO_ERROR && BTREE_IS_UNIQUE (btid_int->unique_pk)) + { + logtb_tran_update_unique_stats (thread_p, btid_int->sys_btid, 1, 1, 0, false); + } + } + + return error_code; +} + +// +// btree_online_index_change_state () - set new object state during online index +// +// thread_p (in) : thread entry +// btid_int (in) : b-tree info +// record (in) : leaf/overflow record +// node_type (in) : node type +// offset_to_object (in) : offset_to_object +// new_state (in) : new object state +// rv_undo_data (in/out) : buffer to append undo log data +// rv_redo_data (in/out) : buffer to append redo log data +// +void +btree_online_index_change_state (THREAD_ENTRY * thread_p, BTID_INT * btid_int, RECDES * record, + BTREE_NODE_TYPE node_type, int offset_to_object, MVCCID new_state, + char **rv_undo_data, char **rv_redo_data) +{ + int offset_to_insid_mvccid; + char *oid_ptr = NULL; + char *mvccid_ptr = NULL; + + oid_ptr = record->data + offset_to_object; + + offset_to_insid_mvccid = offset_to_object + OR_OID_SIZE; + if (btree_is_class_oid_packed (btid_int, record, node_type, (offset_to_object == 0))) + { + /* Class OID is also packed. */ + offset_to_insid_mvccid += OR_OID_SIZE; + } + /* Set mvccid_ptr. */ + mvccid_ptr = record->data + offset_to_insid_mvccid; + + /* Assign the new mvcc_insid. */ + if (btree_record_object_is_flagged (oid_ptr, BTREE_OID_HAS_MVCC_INSID)) + { + // todo - compare to old state and make sure it changes + /* We have MVCC_INSID. */ + if (!btree_online_index_is_normal_state (new_state) + || btree_is_fixed_size (btid_int, record, node_type, (offset_to_object == 0))) + { + /* If we have any state set, except the normal state, or if it is a fixed size record. */ + btree_set_mvccid (record, offset_to_insid_mvccid, &new_state, rv_undo_data, rv_redo_data); + } + else + { + /* We have normal state of the record and the record is not a fixed size one. */ + /* This translates in removing the state. */ + btree_record_remove_insid (thread_p, btid_int, record, node_type, offset_to_object, rv_undo_data, + rv_redo_data, NULL); + } + } + else if (!btree_online_index_is_normal_state (new_state)) + { + /* We don't have MVCC_INSID. */ + btree_add_mvccid (record, offset_to_object, offset_to_insid_mvccid, new_state, BTREE_OID_HAS_MVCC_INSID, + rv_undo_data, rv_redo_data); + } + else + { + // todo - is this possible? basically state is not changed... + assert (false); + } + +#if !defined (NDEBUG) + btree_check_valid_record (thread_p, btid_int, record, node_type, NULL); +#endif +} + +// +// btree_is_class_oid_packed () - is class OID packed with object? +// +// return : true if class oid is packed, false otherwise +// btid_int (in) : b-tree info +// record (in) : record descriptor +// node_type (in) : leaf/overflow node type +// is_first (in) : is object first in record? +// +static bool +btree_is_class_oid_packed (BTID_INT * btid_int, RECDES * record, BTREE_NODE_TYPE node_type, bool is_first) +{ + // class oid is packed if: + // 1. index is unique and + // 2.1. is overflow node or + // 2.2. is not first in leaf record or + // 2.3. is first in leaf record and record is flagged with BTREE_LEAF_RECORD_CLASS_OID + + if (!btid_int->unique_pk) + { + // not unique, no class is saved + return false; + } + + // is unique + + if (node_type == BTREE_OVERFLOW_NODE) + { + // all overflow objects save class + return true; + } + + // is leaf + + if (!is_first) + { + // non-first in leaf record saves class + return true; + } + + // first saves class only if flagged + return btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_CLASS_OID); +} + +static inline bool +btree_is_fixed_size (BTID_INT * btid_int, RECDES * record, BTREE_NODE_TYPE node_type, bool is_first) +{ + return ((node_type == BTREE_OVERFLOW_NODE) || (!is_first && BTREE_IS_UNIQUE (btid_int->unique_pk)) + || (is_first && btree_leaf_is_flaged (record, BTREE_LEAF_RECORD_OVERFLOW_OIDS))); +} + +static bool +btree_is_insert_data_purpose (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_INSERT_NEW_OBJECT: + case BTREE_OP_INSERT_MVCC_DELID: + case BTREE_OP_INSERT_MARK_DELETED: + case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + return true; + default: + return false; + } +} + +static bool +btree_is_insert_object_purpose (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_INSERT_NEW_OBJECT: + case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + return true; + default: + return false; + } +} + +static bool +btree_is_insert_delid_purpose (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_INSERT_MVCC_DELID: + case BTREE_OP_INSERT_MARK_DELETED: + return true; + default: + return false; + } +} + +static bool +btree_is_delete_data_purpose (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: + case BTREE_OP_DELETE_UNDO_INSERT: + case BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD: + case BTREE_OP_DELETE_UNDO_INSERT_DELID: + case BTREE_OP_DELETE_VACUUM_OBJECT: + case BTREE_OP_DELETE_VACUUM_INSID: + case BTREE_OP_ONLINE_INDEX_IB_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + return true; + default: + return false; + } +} + +static bool +btree_is_delete_object_purpose (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_DELETE_OBJECT_PHYSICAL: + case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: + case BTREE_OP_DELETE_UNDO_INSERT: + case BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD: + case BTREE_OP_DELETE_VACUUM_OBJECT: + case BTREE_OP_ONLINE_INDEX_IB_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + return true; + default: + return false; + } +} + +// +// btree_rv_log_delete_object () - log b-tree delete operation according to purpose +// +// thread_p (in) : thread entry +// delete_helper (in) : delete helper +// addr (in) : address for logging +// undo_length (in) : physical undo log size +// redo_length (in) : redo log size (is always physical) +// undo_data (in) : physical undo log +// redo_data (in) : redo log (is always physical) +// +static void +btree_rv_log_delete_object (THREAD_ENTRY * thread_p, const BTREE_DELETE_HELPER & delete_helper, LOG_DATA_ADDR & addr, + int undo_length, int redo_length, const char *undo_data, const char *redo_data) +{ + assert (btree_is_delete_object_purpose (delete_helper.purpose)); + + if (delete_helper.is_system_op_started) + { + // we need to log undoredo physical + log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &addr, undo_length, redo_length, undo_data, + redo_data); + } + else + { + switch (delete_helper.purpose) + { + case BTREE_OP_DELETE_OBJECT_PHYSICAL: + // log undo logical, log redo physical + log_append_undoredo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &addr, delete_helper.rv_keyval_data_length, + redo_length, delete_helper.rv_keyval_data, redo_data); + break; + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + log_append_undoredo_data (thread_p, RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE, &addr, + delete_helper.rv_keyval_data_length, redo_length, delete_helper.rv_keyval_data, + redo_data); + break; + case BTREE_OP_DELETE_OBJECT_PHYSICAL_POSTPONED: + log_append_run_postpone (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &addr, pgbuf_get_vpid_ptr (addr.pgptr), + redo_length, redo_data, &delete_helper.reference_lsa); + break; + case BTREE_OP_DELETE_UNDO_INSERT: + case BTREE_OP_DELETE_UNDO_INSERT_UNQ_MULTIUPD: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, + pgbuf_get_vpid_ptr (addr.pgptr), addr.offset, addr.pgptr, redo_length, + redo_data, LOG_FIND_CURRENT_TDES (thread_p), + &delete_helper.reference_lsa); + break; + case BTREE_OP_DELETE_VACUUM_OBJECT: + case BTREE_OP_ONLINE_INDEX_IB_DELETE: + log_append_redo_data (thread_p, RVBT_DELETE_OBJECT_PHYSICAL, &addr, redo_length, redo_data); + break; + default: + assert (false); + break; + } + } +} + +// +// btree_rv_log_insert_object () - log b-tree insert operation according to purpose +// +// thread_p (in) : thread entry +// insert_helper (in) : insert helper +// addr (in) : address for logging +// undo_length (in) : physical undo log size +// redo_length (in) : redo log size (is always physical) +// undo_data (in) : physical undo log +// redo_data (in) : redo log (is always physical) +// +static void +btree_rv_log_insert_object (THREAD_ENTRY * thread_p, const BTREE_INSERT_HELPER & insert_helper, LOG_DATA_ADDR & addr, + int undo_length, int redo_length, const char *undo_data, const char *redo_data) +{ + assert (btree_is_insert_object_purpose (insert_helper.purpose)); + + if (insert_helper.is_system_op_started) + { + // undo/redo physical + log_append_undoredo_data (thread_p, RVBT_RECORD_MODIFY_UNDOREDO, &addr, undo_length, redo_length, undo_data, + redo_data); + } + else + { + switch (insert_helper.purpose) + { + case BTREE_OP_INSERT_NEW_OBJECT: + // undo logical, redo physical + log_append_undoredo_data (thread_p, insert_helper.rcvindex, &addr, insert_helper.rv_keyval_data_length, + redo_length, insert_helper.rv_keyval_data, redo_data); + break; + + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + /* Safeguard */ + assert (btree_online_index_is_delete_flag_state (insert_helper.obj_info.mvcc_info.insert_mvccid)); + + /* Insert with DELETE_FLAG. */ + log_append_undoredo_data (thread_p, RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE, &addr, + insert_helper.rv_keyval_data_length, redo_length, insert_helper.rv_keyval_data, + redo_data); + + break; + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + /* Normal insert. */ + log_append_undoredo_data (thread_p, RVBT_ONLINE_INDEX_UNDO_TRAN_INSERT, &addr, + insert_helper.rv_keyval_data_length, redo_length, insert_helper.rv_keyval_data, + redo_data); + + break; + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + // redo logging + log_append_redo_data (thread_p, RVBT_RECORD_MODIFY_NO_UNDO, &addr, redo_length, redo_data); + break; + case BTREE_OP_INSERT_UNDO_PHYSICAL_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + log_append_compensate_with_undo_nxlsa (thread_p, RVBT_RECORD_MODIFY_COMPENSATE, + pgbuf_get_vpid_ptr (addr.pgptr), addr.offset, addr.pgptr, + redo_length, redo_data, LOG_FIND_CURRENT_TDES (thread_p), + &insert_helper.compensate_undo_nxlsa); + break; + default: + assert (false); + break; + } + } +} + +/* + * btree_find_oid_with_page_and_record () - Find OID in leaf/overflow pages and output its position and the record. + * + * return : Error code. + * thread_p (in) : Thread entry. + * btid_int (in) : B-tree info. + * oid (in) : Object OID. + * leaf_page (in) : Fixed leaf page (where object's key is found). + * purpose (in) : Purpose/context for the function call. + * match_mvccinfo (in) : Non-null value to be matched or null if it doesn't matter. + * record (in) : Key leaf record. + * leaf_rec_info (in) : Key leaf record info. + * after_key_offset (in) : Offset in leaf record where packed key is ended. + * found_page (out) : Outputs leaf or overflow page where object is found. + * prev_page (out) : Previous page of the overflow page where object object is found. If object is in leaf it + * will output NULL. If object is in first overflow, it will output leaf page. + * If argument is NULL, previous overflow page is unfixed. + * offset_to_object (out) : Offset to object in the record of leaf/overflow. + * new_record (out) : The new record in case of overflow pages. + * + */ +static int +btree_find_oid_with_page_and_record (THREAD_ENTRY * thread_p, BTID_INT * btid_int, OID * oid, PAGE_PTR leaf_page, + BTREE_OP_PURPOSE purpose, BTREE_MVCC_INFO * match_mvccinfo, RECDES * record, + LEAF_REC * leaf_info, int offset_after_key, PAGE_PTR * found_page, + PAGE_PTR * prev_page, int *offset_to_object, BTREE_MVCC_INFO * object_mvcc_info, + RECDES * new_record) +{ + int error_code = NO_ERROR; + + error_code = btree_find_oid_and_its_page (thread_p, btid_int, oid, leaf_page, purpose, NULL, record, leaf_info, + offset_after_key, found_page, prev_page, offset_to_object, + object_mvcc_info); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + + if (*offset_to_object == NOT_FOUND) + { + /* Object not found, end this. */ + + *new_record = *record; + return error_code; + } + + /* We found the object. */ + + if (*found_page == leaf_page) + { + /* No overflow, set new_record to the record. */ + *new_record = *record; + return error_code; + } + + /* Overflow page. */ + + /* Get the new record. */ + if (spage_get_record (thread_p, *found_page, 1, new_record, COPY) != S_SUCCESS) + { + assert_release (false); + return ER_FAILED; + } + + return error_code; +} + +/* + * btree_rv_keyval_undo_online_index_tran_delete () - + * return: int + * recv(in): Recovery structure + * + * Note: undo the deletion of a pair to the B+tree, + * by inserting the pair to the tree during an online index operation. + */ +int +btree_rv_keyval_undo_online_index_tran_delete (THREAD_ENTRY * thread_p, LOG_RCV * recv) +{ + BTID_INT btid; + BTID sys_btid; + DB_VALUE key; + OID cls_oid; + OID oid; + char *datap; + int datasize; + BTREE_MVCC_INFO mvcc_info; + int error_code = NO_ERROR; + + /* btid needs a place to unpack the sys_btid into. We'll use stack space. */ + btid.sys_btid = &sys_btid; + + /* extract the stored btid, key, oid data */ + datap = (char *) recv->data; + datasize = recv->length; + error_code = btree_rv_read_keyval_info_nocopy (thread_p, datap, datasize, &btid, &cls_oid, &oid, &mvcc_info, &key); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + assert (!OID_ISNULL (&oid)); + + /* Insert object and all its info. */ + error_code = btree_online_index_dispatcher (thread_p, btid.sys_btid, &key, &cls_oid, &oid, btid.unique_pk, + BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE, &recv->reference_lsa); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + return error_code; + } + + return NO_ERROR; +} + +/* + * btree_rv_keyval_undo_online_index_tran_insert () - Undo insert operation for btree during online index. + * + * return : Error code. + * thread_p (in) : Thread entry. + * recv (in) : Recovery data. + */ +int +btree_rv_keyval_undo_online_index_tran_insert (THREAD_ENTRY * thread_p, LOG_RCV * recv) +{ + BTID_INT btid; + BTID sys_btid; + OID cls_oid; + OID oid; + char *datap; + int datasize; + BTREE_MVCC_INFO dummy_mvcc_info; + int err = NO_ERROR; + DB_VALUE key; + + /* btid needs a place to unpack the sys_btid into. We'll use stack space. */ + btid.sys_btid = &sys_btid; + + /* extract the stored btid, key, oid data */ + datap = (char *) recv->data; + datasize = recv->length; + err = btree_rv_read_keyval_info_nocopy (thread_p, datap, datasize, &btid, &cls_oid, &oid, &dummy_mvcc_info, &key); + if (err != NO_ERROR) + { + ASSERT_ERROR (); + return err; + } + + assert (!OID_ISNULL (&oid)); + + /* Undo insert: just delete object and all its information. */ + err = btree_online_index_dispatcher (thread_p, btid.sys_btid, &key, &cls_oid, &oid, btid.unique_pk, + BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT, &recv->reference_lsa); + if (err != NO_ERROR) + { + ASSERT_ERROR (); + return err; + } + + return NO_ERROR; +} + +void +btree_insert_helper_to_delete_helper (BTREE_INSERT_HELPER * insert_helper, BTREE_DELETE_HELPER * delete_helper) +{ + /* oid, classoid and mvcc info */ + delete_helper->object_info.oid = insert_helper->obj_info.oid; + delete_helper->object_info.class_oid = insert_helper->obj_info.class_oid; + delete_helper->object_info.mvcc_info = insert_helper->obj_info.mvcc_info; + + /* save the LSA needed for recovery */ + LSA_COPY (&delete_helper->reference_lsa, &insert_helper->compensate_undo_nxlsa); + + /* Leaf addr. */ + delete_helper->leaf_addr.offset = insert_helper->leaf_addr.offset; + delete_helper->leaf_addr.pgptr = insert_helper->leaf_addr.pgptr; + delete_helper->leaf_addr.vfid = insert_helper->leaf_addr.vfid; + + /* Undo logging. */ + delete_helper->rv_keyval_data = insert_helper->rv_keyval_data; + delete_helper->rv_keyval_data_length = insert_helper->rv_keyval_data_length; + + /* Redo logging. */ + delete_helper->rv_redo_data = insert_helper->rv_redo_data; + delete_helper->rv_redo_data_ptr = delete_helper->rv_redo_data; + + /* Error logging. */ + delete_helper->log_operations = insert_helper->log_operations; + delete_helper->printed_key = insert_helper->printed_key; + delete_helper->printed_key_sha1 = insert_helper->printed_key_sha1; +} + +void +btree_delete_helper_to_insert_helper (BTREE_DELETE_HELPER * delete_helper, BTREE_INSERT_HELPER * insert_helper) +{ + /* oid, classoid and mvcc info */ + insert_helper->obj_info.oid = delete_helper->object_info.oid; + insert_helper->obj_info.class_oid = delete_helper->object_info.class_oid; + insert_helper->obj_info.mvcc_info = delete_helper->object_info.mvcc_info; + + /* save the LSA needed for recovery */ + LSA_COPY (&insert_helper->compensate_undo_nxlsa, &delete_helper->reference_lsa); + + /* Leaf addr. */ + insert_helper->leaf_addr.offset = delete_helper->leaf_addr.offset; + insert_helper->leaf_addr.pgptr = delete_helper->leaf_addr.pgptr; + insert_helper->leaf_addr.vfid = delete_helper->leaf_addr.vfid; + + /* Undo logging. */ + insert_helper->rv_keyval_data = delete_helper->rv_keyval_data; + insert_helper->rv_keyval_data_length = delete_helper->rv_keyval_data_length; + + /* Redo logging. */ + insert_helper->rv_redo_data = delete_helper->rv_redo_data; + insert_helper->rv_redo_data_ptr = insert_helper->rv_redo_data; + + /* Error logging. */ + insert_helper->log_operations = delete_helper->log_operations; + insert_helper->printed_key = delete_helper->printed_key; + insert_helper->printed_key_sha1 = delete_helper->printed_key_sha1; +} + +static inline bool +btree_is_online_index_loading (BTREE_OP_PURPOSE purpose) +{ + switch (purpose) + { + case BTREE_OP_ONLINE_INDEX_IB_INSERT: + case BTREE_OP_ONLINE_INDEX_IB_DELETE: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT: + case BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF: + case BTREE_OP_ONLINE_INDEX_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE: + case BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT: + return true; + default: + return false; + } + + return false; +} + +int +btree_online_index_check_unique_constraint (THREAD_ENTRY * thread_p, BTID * btid, const char *index_name, + OID * class_oid) +{ + int ret = NO_ERROR; + int g_num_oids = 0, g_num_nulls = 0, g_num_keys = 0; + LOG_TRAN_BTID_UNIQUE_STATS *unique_stats = logtb_tran_find_btid_stats (thread_p, btid, true); + + if (unique_stats == NULL) + { + return ER_FAILED; + } + + ret = logtb_get_global_unique_stats (thread_p, btid, &g_num_oids, &g_num_nulls, &g_num_keys); + if (ret != NO_ERROR) + { + ASSERT_ERROR (); + return ret; + } + + if ((g_num_oids + unique_stats->tran_stats.num_oids) + != (g_num_keys + unique_stats->tran_stats.num_keys) + (g_num_nulls + unique_stats->tran_stats.num_nulls)) + { + /* Unique constraint violation. */ + BTREE_SET_UNIQUE_VIOLATION_ERROR (thread_p, NULL, NULL, class_oid, btid, index_name); + return ER_BTREE_UNIQUE_FAILED; + } + + return NO_ERROR; +} + +int +btree_get_class_oid_of_unique_btid (THREAD_ENTRY * thread_p, BTID * btid, OID * class_oid) +{ + PAGE_PTR root_page; + BTREE_ROOT_HEADER *root_header = NULL; + + OID_SET_NULL (class_oid); + + root_page = btree_fix_root_with_info (thread_p, btid, PGBUF_LATCH_READ, NULL, &root_header, NULL); + if (root_page == NULL) + { + return ER_FAILED; + } + + if (BTREE_IS_UNIQUE (root_header->unique_pk)) + { + /* Copy the class oid */ + COPY_OID (class_oid, &root_header->topclass_oid); + } + + pgbuf_unfix_and_init (thread_p, root_page); + + return NO_ERROR; +} + +bool +btree_is_btid_online_index (THREAD_ENTRY * thread_p, OID * class_oid, BTID * btid) +{ + OR_CLASSREP *rep = NULL; + int idx_incache = -1; + bool result = false; + int i; + + rep = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_incache); + if (rep == NULL) + { + assert (false); + return false; + } + + /* Iterate through indexes of current class_oid and check if the one matching the btid is an online one. */ + for (i = 0; i < rep->n_indexes; i++) + { + if (BTID_IS_EQUAL (btid, &rep->indexes[i].btid)) + { + if (rep->indexes[i].index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + result = true; + } + break; + } + } + + heap_classrepr_free_and_init (rep, &idx_incache); + + return result; +} diff --git a/src/storage/btree.h b/src/storage/btree.h index bfd0dfc2727..a4488789049 100644 --- a/src/storage/btree.h +++ b/src/storage/btree.h @@ -485,7 +485,16 @@ enum btree_op_purpose BTREE_OP_DELETE_VACUUM_INSID, /* Remove only insert MVCCID for an object in b-tree. It is called by vacuum when the * object becomes visible to all running transactions. */ - BTREE_OP_NOTIFY_VACUUM /* Notify vacuum of an object in need of cleanup. */ + BTREE_OP_NOTIFY_VACUUM, /* Notify vacuum of an object in need of cleanup. */ + + /* Below purposes are used during online index loading. */ + BTREE_OP_ONLINE_INDEX_IB_INSERT, /* Insert done by the Index Builder. */ + BTREE_OP_ONLINE_INDEX_IB_DELETE, /* Delete done by the Index Builder. */ + BTREE_OP_ONLINE_INDEX_TRAN_INSERT, /* Insert done by a transaction. */ + BTREE_OP_ONLINE_INDEX_TRAN_INSERT_DF, /* Insert done by a transaction with DELETE_FLAG set. */ + BTREE_OP_ONLINE_INDEX_UNDO_TRAN_INSERT, /* Undo an insert */ + BTREE_OP_ONLINE_INDEX_TRAN_DELETE, /* Delete done by a transaction. */ + BTREE_OP_ONLINE_INDEX_UNDO_TRAN_DELETE /* Undo a delete. */ }; typedef enum btree_op_purpose BTREE_OP_PURPOSE; @@ -724,4 +733,14 @@ extern PERF_PAGE_TYPE btree_get_perf_btree_page_type (THREAD_ENTRY * thread_p, P extern void btree_dump_key (THREAD_ENTRY * thread_p, FILE * fp, DB_VALUE * key); +extern int btree_online_index_dispatcher (THREAD_ENTRY * thread_p, BTID * btid, DB_VALUE * key, OID * cls_oid, + OID * oid, int unique, BTREE_OP_PURPOSE purpose, LOG_LSA * undo_nxlsa); + +extern int btree_rv_keyval_undo_online_index_tran_insert (THREAD_ENTRY * thread_p, LOG_RCV * recv); +extern int btree_rv_keyval_undo_online_index_tran_delete (THREAD_ENTRY * thread_p, LOG_RCV * recv); + +extern int btree_online_index_check_unique_constraint (THREAD_ENTRY * thread_p, BTID * btid, const char *index_name, + OID * class_oid); +extern int btree_get_class_oid_of_unique_btid (THREAD_ENTRY * thread_p, BTID * btid, OID * class_oid); +extern bool btree_is_btid_online_index (THREAD_ENTRY * thread_p, OID * class_oid, BTID * btid); #endif /* _BTREE_H_ */ diff --git a/src/storage/btree_load.c b/src/storage/btree_load.c index 7f480c16dd6..686298652a3 100644 --- a/src/storage/btree_load.c +++ b/src/storage/btree_load.c @@ -163,7 +163,7 @@ static int btree_first_oid (THREAD_ENTRY * thread_p, DB_VALUE * this_key, OID * MVCC_REC_HEADER * p_mvcc_rec_header, LOAD_ARGS * load_args); static int btree_construct_leafs (THREAD_ENTRY * thread_p, const RECDES * in_recdes, void *arg); static int btree_get_value_from_leaf_slot (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PAGE_PTR leaf_ptr, - int slot_id, DB_VALUE * key); + int slot_id, DB_VALUE * key, bool * clear_key); #if defined(CUBRID_DEBUG) static int btree_dump_sort_output (const RECDES * recdes, LOAD_ARGS * load_args); #endif /* defined(CUBRID_DEBUG) */ @@ -180,13 +180,19 @@ static void list_print (const BTREE_NODE * this_list); static int btree_pack_root_header (RECDES * Rec, BTREE_ROOT_HEADER * header, TP_DOMAIN * key_type); static void btree_rv_save_root_head (int null_delta, int oid_delta, int key_delta, RECDES * recdes); static int btree_advance_to_next_slot_and_fix_page (THREAD_ENTRY * thread_p, BTID_INT * btid, VPID * vpid, - PAGE_PTR * pg_ptr, INT16 * slot_id, DB_VALUE * key, bool is_desc, - int *key_cnt, BTREE_NODE_HEADER ** header, MVCC_SNAPSHOT * mvcc); + PAGE_PTR * pg_ptr, INT16 * slot_id, DB_VALUE * key, + bool * clear_key, bool is_desc, int *key_cnt, + BTREE_NODE_HEADER ** header, MVCC_SNAPSHOT * mvcc); static int btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args_local, const SORT_ARGS * sort_args_local); static int btree_is_slot_visible (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR pg_ptr, MVCC_SNAPSHOT * mvcc_snapshot, int slot_id, bool * is_slot_visible); +static int online_index_builder (THREAD_ENTRY * thread_p, BTID_INT * btid_int, HFID * hfids, OID * class_oids, + int n_classes, int *attrids, int n_attrs, FUNCTION_INDEX_INFO func_idx_info, + PRED_EXPR_WITH_CONTEXT * filter_pred, int *attrs_prefix_length, + HEAP_CACHE_ATTRINFO * attr_info, HEAP_SCANCACHE * scancache, int unique_pk); + /* * btree_get_node_header () - * @@ -752,7 +758,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP func_index_info.col_id = func_col_id; func_index_info.attr_index_start = func_attr_index_start; func_index_info.expr = NULL; - if (stx_map_stream_to_func_pred (thread_p, (FUNC_PRED **) (&func_index_info.expr), func_pred_stream, + if (stx_map_stream_to_func_pred (thread_p, &func_index_info.expr, func_pred_stream, func_pred_stream_size, &func_unpack_info)) { goto error; @@ -806,7 +812,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP { if (heap_attrinfo_start (thread_p, &sort_args->class_ids[cur_class], sort_args->n_attrs, &sort_args->attr_ids[attr_offset], - ((FUNC_PRED *) sort_args->func_index_info->expr)->cache_attrinfo) != NO_ERROR) + sort_args->func_index_info->expr->cache_attrinfo) != NO_ERROR) { goto error; } @@ -891,7 +897,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP } if (sort_args->func_index_info) { - heap_attrinfo_end (thread_p, ((FUNC_PRED *) sort_args->func_index_info->expr)->cache_attrinfo); + heap_attrinfo_end (thread_p, sort_args->func_index_info->expr->cache_attrinfo); } } sort_args->attrinfo_inited = 0; @@ -1002,7 +1008,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP } if (sort_args->func_index_info && sort_args->func_index_info->expr) { - (void) qexec_clear_func_pred (thread_p, (FUNC_PRED *) sort_args->func_index_info->expr); + (void) qexec_clear_func_pred (thread_p, sort_args->func_index_info->expr); } if (func_unpack_info) { @@ -1061,7 +1067,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP } if (sort_args->func_index_info && sort_args->func_index_info->expr) { - heap_attrinfo_end (thread_p, ((FUNC_PRED *) sort_args->func_index_info->expr)->cache_attrinfo); + heap_attrinfo_end (thread_p, sort_args->func_index_info->expr->cache_attrinfo); } } VFID_SET_NULL (&btid->vfid); @@ -1112,7 +1118,7 @@ xbtree_load_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP } if (sort_args->func_index_info && sort_args->func_index_info->expr) { - (void) qexec_clear_func_pred (thread_p, (FUNC_PRED *) sort_args->func_index_info->expr); + (void) qexec_clear_func_pred (thread_p, sort_args->func_index_info->expr); } if (func_unpack_info) { @@ -1411,8 +1417,8 @@ btree_build_nleafs (THREAD_ENTRY * thread_p, LOAD_ARGS * load_args, int n_nulls, rec.area_size = DB_PAGESIZE; rec.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); - db_make_null (&last_key); - db_make_null (&first_key); + btree_init_temp_key_value (&clear_last_key, &last_key); + btree_init_temp_key_value (&clear_first_key, &first_key); db_make_null (&prefix_key); temp_data = (char *) os_malloc (DB_PAGESIZE); @@ -2951,7 +2957,7 @@ btree_sort_get_next (THREAD_ENTRY * thread_p, RECDES * temp_recdes, void *arg) } if (sort_args->func_index_info && sort_args->func_index_info->expr) { - heap_attrinfo_end (thread_p, ((FUNC_PRED *) sort_args->func_index_info->expr)->cache_attrinfo); + heap_attrinfo_end (thread_p, sort_args->func_index_info->expr->cache_attrinfo); } } sort_args->attrinfo_inited = 0; @@ -3065,8 +3071,7 @@ btree_sort_get_next (THREAD_ENTRY * thread_p, RECDES * temp_recdes, void *arg) if (sort_args->func_index_info && sort_args->func_index_info->expr) { if (heap_attrinfo_read_dbvalues (thread_p, &sort_args->cur_oid, &sort_args->in_recdes, NULL, - ((FUNC_PRED *) sort_args->func_index_info->expr)->cache_attrinfo) != - NO_ERROR) + sort_args->func_index_info->expr->cache_attrinfo) != NO_ERROR) { return SORT_ERROR_OCCURRED; } @@ -3708,6 +3713,7 @@ int btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const SORT_ARGS * sort_args) { DB_VALUE fk_key, pk_key; + bool clear_fk_key, clear_pk_key; int fk_node_key_cnt = -1, pk_node_key_cnt = -1; BTREE_NODE_HEADER *fk_node_header = NULL, *pk_node_header = NULL; VPID vpid; @@ -3732,8 +3738,8 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const BTREE_SCAN_PART partitions[MAX_PARTITIONS]; bool has_nulls = false; - db_make_null (&fk_key); - db_make_null (&pk_key); + btree_init_temp_key_value (&clear_fk_key, &fk_key); + btree_init_temp_key_value (&clear_pk_key, &pk_key); mvcc_snapshot_dirty.snapshot_fnc = mvcc_satisfies_dirty; @@ -3819,8 +3825,8 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const while (true) { ret = btree_advance_to_next_slot_and_fix_page (thread_p, sort_args->btid, &vpid, &curr_fk_pageptr, &fk_slot_id, - &fk_key, is_fk_scan_desc, &fk_node_key_cnt, &fk_node_header, - &mvcc_snapshot_dirty); + &fk_key, &clear_fk_key, is_fk_scan_desc, &fk_node_key_cnt, + &fk_node_header, &mvcc_snapshot_dirty); if (ret != NO_ERROR) { ASSERT_ERROR (); @@ -3988,8 +3994,9 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const while (!found) { ret = btree_advance_to_next_slot_and_fix_page (thread_p, &pk_bt_scan.btid_int, &pk_bt_scan.C_vpid, - &pk_bt_scan.C_page, &pk_bt_scan.slot_id, &pk_key, false, - &pk_node_key_cnt, &pk_node_header, &mvcc_snapshot_dirty); + &pk_bt_scan.C_page, &pk_bt_scan.slot_id, &pk_key, + &clear_pk_key, false, &pk_node_key_cnt, &pk_node_header, + &mvcc_snapshot_dirty); if (ret != NO_ERROR) { goto end; @@ -4047,10 +4054,10 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const if (found == true) { - pr_clear_value (&fk_key); + btree_clear_key_value (&clear_fk_key, &fk_key); } - pr_clear_value (&pk_key); + btree_clear_key_value (&clear_pk_key, &pk_key); } end: @@ -4081,15 +4088,8 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const pgbuf_unfix_and_init (thread_p, pk_bt_scan.C_page); } - if (!DB_IS_NULL (&fk_key)) - { - pr_clear_value (&fk_key); - } - - if (!DB_IS_NULL (&pk_key)) - { - pr_clear_value (&pk_key); - } + btree_clear_key_value (&clear_fk_key, &fk_key); + btree_clear_key_value (&clear_pk_key, &pk_key); if (clear_pcontext == true) { @@ -4112,14 +4112,14 @@ btree_load_check_fk (THREAD_ENTRY * thread_p, const LOAD_ARGS * load_args, const * leaf_ptr(in): The leaf where the value needs to be extracted from. * slot_id(in): The slot from where the value must be pulled. * key(out): The value requested. + * clear_key(out): needs to clear key if set * */ static int btree_get_value_from_leaf_slot (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PAGE_PTR leaf_ptr, int slot_id, - DB_VALUE * key) + DB_VALUE * key, bool * clear_key) { LEAF_REC leaf; - bool clear_first_key = false; int first_key_offset = 0; RECDES record; int ret = NO_ERROR; @@ -4131,7 +4131,7 @@ btree_get_value_from_leaf_slot (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PA return ret; } - ret = btree_read_record (thread_p, btid_int, leaf_ptr, &record, key, &leaf, BTREE_LEAF_NODE, &clear_first_key, + ret = btree_read_record (thread_p, btid_int, leaf_ptr, &record, key, &leaf, BTREE_LEAF_NODE, clear_key, &first_key_offset, PEEK_KEY_VALUE, NULL); if (ret != NO_ERROR) { @@ -4151,6 +4151,7 @@ btree_get_value_from_leaf_slot (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PA * pg_ptr(in/out): Page pointer for the current page. * slot_id(in/out): Slot id of the current/next value. * key(out): Requested key. + * clear_key(out): needs to clear key if set. * key_cnt(in/out): Number of keys in current page. * header(in/out): The header of the current page. * mvcc(in): Needed for visibility check. @@ -4162,7 +4163,7 @@ btree_get_value_from_leaf_slot (THREAD_ENTRY * thread_p, BTID_INT * btid_int, PA */ static int btree_advance_to_next_slot_and_fix_page (THREAD_ENTRY * thread_p, BTID_INT * btid, VPID * vpid, PAGE_PTR * pg_ptr, - INT16 * slot_id, DB_VALUE * key, bool is_desc, int *key_cnt, + INT16 * slot_id, DB_VALUE * key, bool * clear_key, bool is_desc, int *key_cnt, BTREE_NODE_HEADER ** header, MVCC_SNAPSHOT * mvcc) { int ret = NO_ERROR; @@ -4271,7 +4272,7 @@ btree_advance_to_next_slot_and_fix_page (THREAD_ENTRY * thread_p, BTID_INT * bti if (page != NULL) { - ret = btree_get_value_from_leaf_slot (thread_p, btid, page, *slot_id, key); + ret = btree_get_value_from_leaf_slot (thread_p, btid, page, *slot_id, key, clear_key); } *header = local_header; @@ -4344,3 +4345,447 @@ btree_is_slot_visible (THREAD_ENTRY * thread_p, BTID_INT * btid, PAGE_PTR pg_ptr return ret; } + +BTID * +xbtree_load_online_index (THREAD_ENTRY * thread_p, BTID * btid, const char *bt_name, TP_DOMAIN * key_type, + OID * class_oids, int n_classes, int n_attrs, int *attr_ids, int *attrs_prefix_length, + HFID * hfids, int unique_pk, int not_null_flag, OID * fk_refcls_oid, BTID * fk_refcls_pk_btid, + const char *fk_name, char *pred_stream, int pred_stream_size, char *func_pred_stream, + int func_pred_stream_size, int func_col_id, int func_attr_index_start) +{ + int cur_class, attr_offset; + BTID_INT btid_int; + PRED_EXPR_WITH_CONTEXT *filter_pred = NULL; + FUNCTION_INDEX_INFO func_index_info; + DB_TYPE single_node_type = DB_TYPE_NULL; + void *func_unpack_info = NULL; + bool is_sysop_started = false; + MVCC_SNAPSHOT *builder_snapshot = NULL; + HEAP_SCANCACHE scan_cache; + HEAP_CACHE_ATTRINFO attr_info; + int ret = NO_ERROR; + LOCK old_lock = SCH_M_LOCK; + LOCK new_lock = IX_LOCK; + bool scan_cache_inited = false; + bool attr_info_inited = false; + LOG_TDES *tdes; + int old_wait_msec; + int lock_ret; + + func_index_info.expr = NULL; + + /* Check for robustness */ + if (!btid || !hfids || !class_oids || !attr_ids || !key_type) + { + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_BTREE_LOAD_FAILED, 0); + return NULL; + } + + btid_int.sys_btid = btid; + btid_int.unique_pk = unique_pk; + +#if !defined(NDEBUG) + if (unique_pk) + { + assert (BTREE_IS_UNIQUE (btid_int.unique_pk)); + assert (BTREE_IS_PRIMARY_KEY (btid_int.unique_pk) || !BTREE_IS_PRIMARY_KEY (btid_int.unique_pk)); + } +#endif + + btid_int.key_type = key_type; + VFID_SET_NULL (&btid_int.ovfid); + btid_int.rev_level = BTREE_CURRENT_REV_LEVEL; + COPY_OID (&btid_int.topclass_oid, &class_oids[0]); + /* + * for btree_range_search, part_key_desc is re-set at btree_initialize_bts + */ + btid_int.part_key_desc = 0; + + /* init index key copy_buf info */ + btid_int.copy_buf = NULL; + btid_int.copy_buf_len = 0; + btid_int.nonleaf_key_type = btree_generate_prefix_domain (&btid_int); + + if (pred_stream && pred_stream_size > 0) + { + if (stx_map_stream_to_filter_pred (thread_p, &filter_pred, pred_stream, pred_stream_size) != NO_ERROR) + { + goto error; + } + } + + if (func_pred_stream && func_pred_stream_size > 0) + { + func_index_info.expr_stream = func_pred_stream; + func_index_info.expr_stream_size = func_pred_stream_size; + func_index_info.col_id = func_col_id; + func_index_info.attr_index_start = func_attr_index_start; + func_index_info.expr = NULL; + if (stx_map_stream_to_func_pred (thread_p, &func_index_info.expr, func_pred_stream, + func_pred_stream_size, &func_unpack_info)) + { + goto error; + } + } + + /* After building index acquire lock on table, the transaction has deadlock priority */ + tdes = LOG_FIND_CURRENT_TDES (thread_p); + if (tdes) + { + tdes->has_deadlock_priority = true; + } + + cur_class = 0; + /* + * Start a heap scan cache for reading objects using the first nun-null heap + * We are guaranteed that such a heap exists, otherwise btree_load_index + * would not have been called. + */ + while (cur_class < n_classes && HFID_IS_NULL (&hfids[cur_class])) + { + cur_class++; + } + + attr_offset = cur_class * n_attrs; + + /* Start scancache */ + if (heap_scancache_start (thread_p, &scan_cache, &hfids[cur_class], &class_oids[cur_class], true, false, + NULL) != NO_ERROR) + { + goto error; + } + scan_cache_inited = true; + + if (heap_attrinfo_start (thread_p, &class_oids[cur_class], n_attrs, &attr_ids[attr_offset], &attr_info) != NO_ERROR) + { + goto error; + } + attr_info_inited = true; + + if (filter_pred != NULL) + { + if (heap_attrinfo_start (thread_p, &class_oids[cur_class], filter_pred->num_attrs_pred, + filter_pred->attrids_pred, filter_pred->cache_pred) != NO_ERROR) + { + goto error; + } + } + + if (func_index_info.expr != NULL) + { + if (heap_attrinfo_start (thread_p, &class_oids[cur_class], n_attrs, &attr_ids[attr_offset], + func_index_info.expr->cache_attrinfo) != NO_ERROR) + { + goto error; + } + } + + if (prm_get_bool_value (PRM_ID_LOG_BTREE_OPS)) + { + _er_log_debug (ARG_FILE_LINE, "DEBUG_BTREE: load start on class(%d, %d, %d), btid(%d, (%d, %d)).", + OID_AS_ARGS (&class_oids[cur_class]), BTID_AS_ARGS (btid_int.sys_btid)); + } + + /* Acquire snapshot!! */ + builder_snapshot = logtb_get_mvcc_snapshot (thread_p); + if (builder_snapshot == NULL) + { + goto error; + } + + /* Assign the snapshot to the scan_cache. */ + scan_cache.mvcc_snapshot = builder_snapshot; + + /* Demote the lock. */ + ret = lock_demote_class_lock (thread_p, class_oids, new_lock, &old_lock); + if (ret != NO_ERROR) + { + goto error; + } + + /* Start the online index builder. */ + ret = online_index_builder (thread_p, &btid_int, hfids, class_oids, n_classes, attr_ids, n_attrs, + func_index_info, filter_pred, attrs_prefix_length, &attr_info, &scan_cache, unique_pk); + + // We shold recover the lock regardless of return code from online_index_builder. + // Otherwise, we might be doomed to failure to abort the transaction. + // We are going to do best to avoid lock promotion errors such as timeout and deadlocked. + + // never give up + old_wait_msec = xlogtb_reset_wait_msecs (thread_p, LK_INFINITE_WAIT); + + /* Promote the lock to SCH_M_LOCK */ + lock_ret = lock_object (thread_p, class_oids, oid_Root_class_oid, SCH_M_LOCK, LK_UNCOND_LOCK); + if (lock_ret != LK_GRANTED) + { + // FIXME: What can we do?? + assert (lock_ret == LK_GRANTED); + } + + // reset back + (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msec); + + if (ret != NO_ERROR || lock_ret != LK_GRANTED) + { + goto error; + } + + if (BTREE_IS_UNIQUE (unique_pk)) + { + /* Check if we have a unique constraint violation for unique indexes. */ + ret = btree_online_index_check_unique_constraint (thread_p, btid, bt_name, class_oids); + if (ret != NO_ERROR) + { + ASSERT_ERROR (); + btid = NULL; + goto error; + } + } + + /* Clear memory structures. */ + if (attr_info_inited) + { + heap_attrinfo_end (thread_p, &attr_info); + + if (filter_pred) + { + heap_attrinfo_end (thread_p, filter_pred->cache_pred); + } + if (func_index_info.expr) + { + heap_attrinfo_end (thread_p, func_index_info.expr->cache_attrinfo); + } + + attr_info_inited = false; + } + + if (scan_cache_inited) + { + heap_scancache_end (thread_p, &scan_cache); + scan_cache_inited = false; + } + + /* Verify the tree. */ + btree_verify_tree (thread_p, &class_oids[0], &btid_int, bt_name); + + if (filter_pred != NULL) + { + /* to clear db values from dbvalue regu variable */ + qexec_clear_pred_context (thread_p, filter_pred, true); + + if (filter_pred->unpack_info != NULL) + { + stx_free_additional_buff (thread_p, filter_pred->unpack_info); + stx_free_xasl_unpack_info (filter_pred->unpack_info); + db_private_free_and_init (thread_p, filter_pred->unpack_info); + } + } + + if (func_index_info.expr != NULL) + { + (void) qexec_clear_func_pred (thread_p, func_index_info.expr); + func_index_info.expr = NULL; + } + + if (func_unpack_info != NULL) + { + stx_free_additional_buff (thread_p, func_unpack_info); + stx_free_xasl_unpack_info (func_unpack_info); + db_private_free_and_init (thread_p, func_unpack_info); + } + + LOG_CS_ENTER (thread_p); + logpb_flush_pages_direct (thread_p); + LOG_CS_EXIT (thread_p); + + /* TODO: Is this all right? */ + /* Invalidate snapshot. */ + if (builder_snapshot != NULL) + { + logtb_invalidate_snapshot_data (thread_p); + } + + return btid; + +error: + if (attr_info_inited) + { + heap_attrinfo_end (thread_p, &attr_info); + + if (filter_pred) + { + heap_attrinfo_end (thread_p, filter_pred->cache_pred); + } + if (func_index_info.expr) + { + heap_attrinfo_end (thread_p, func_index_info.expr->cache_attrinfo); + } + + attr_info_inited = false; + } + + if (scan_cache_inited) + { + heap_scancache_end (thread_p, &scan_cache); + scan_cache_inited = false; + } + + if (filter_pred != NULL) + { + /* to clear db values from dbvalue regu variable */ + qexec_clear_pred_context (thread_p, filter_pred, true); + + if (filter_pred->unpack_info != NULL) + { + stx_free_additional_buff (thread_p, filter_pred->unpack_info); + stx_free_xasl_unpack_info (filter_pred->unpack_info); + db_private_free_and_init (thread_p, filter_pred->unpack_info); + } + } + + if (func_index_info.expr != NULL) + { + (void) qexec_clear_func_pred (thread_p, func_index_info.expr); + } + + if (func_unpack_info != NULL) + { + stx_free_additional_buff (thread_p, func_unpack_info); + stx_free_xasl_unpack_info (func_unpack_info); + db_private_free_and_init (thread_p, func_unpack_info); + } + + /* Invalidate snapshot. */ + if (builder_snapshot != NULL) + { + logtb_invalidate_snapshot_data (thread_p); + } + + return NULL; +} + +static int +online_index_builder (THREAD_ENTRY * thread_p, BTID_INT * btid_int, HFID * hfids, OID * class_oids, int n_classes, + int *attrids, int n_attrs, FUNCTION_INDEX_INFO func_idx_info, + PRED_EXPR_WITH_CONTEXT * filter_pred, int *attrs_prefix_length, HEAP_CACHE_ATTRINFO * attr_info, + HEAP_SCANCACHE * scancache, int unique_pk) +{ + int ret = NO_ERROR, eval_res; + OID cur_oid; + RECDES cur_record; + int cur_class; + SCAN_CODE sc; + FUNCTION_INDEX_INFO *p_func_idx_info; + PR_EVAL_FNC filter_eval_fnc; + DB_TYPE single_node_type = DB_TYPE_NULL; + int attr_offset; + DB_VALUE dbvalue; + DB_VALUE *p_dbvalue; + int *p_prefix_length; + char midxkey_buf[DBVAL_BUFSIZE + MAX_ALIGNMENT], *aligned_midxkey_buf; + char rec_buf[IO_MAX_PAGE_SIZE + BTREE_MAX_ALIGN]; + + aligned_midxkey_buf = PTR_ALIGN (midxkey_buf, MAX_ALIGNMENT); + db_make_null (&dbvalue); + p_func_idx_info = func_idx_info.expr ? &func_idx_info : NULL; + filter_eval_fnc = (filter_pred != NULL) ? eval_fnc (thread_p, filter_pred->pred, &single_node_type) : NULL; + + /* Get the first entry from heap. */ + cur_class = 0; + OID_SET_NULL (&cur_oid); + cur_oid.volid = hfids[cur_class].vfid.volid; + + /* Do not let the page fixed after an extract. */ + scancache->cache_last_fix_page = false; + + /* Start extracting from heap. */ + for (;;) + { + /* Scan from heap and insert into the index. */ + attr_offset = cur_class * n_attrs; + + cur_record.data = PTR_ALIGN (rec_buf, BTREE_MAX_ALIGN); + cur_record.area_size = IO_MAX_PAGE_SIZE; + + sc = heap_next (thread_p, &hfids[cur_class], &class_oids[cur_class], &cur_oid, &cur_record, scancache, true); + if (sc == S_ERROR) + { + ASSERT_ERROR_AND_SET (ret); + return ret; + } + else if (sc == S_END) + { + break; + } + + /* Make sure the scan was a success. */ + assert (sc == S_SUCCESS); + assert (!OID_ISNULL (&cur_oid)); + + if (filter_pred) + { + ret = heap_attrinfo_read_dbvalues (thread_p, &cur_oid, &cur_record, NULL, filter_pred->cache_pred); + if (ret != NO_ERROR) + { + return ret; + } + + eval_res = (*filter_eval_fnc) (thread_p, filter_pred->pred, NULL, &cur_oid); + if (eval_res == V_ERROR) + { + return ER_FAILED; + } + else if (eval_res != V_TRUE) + { + continue; + } + } + + if (p_func_idx_info && p_func_idx_info->expr) + { + ret = heap_attrinfo_read_dbvalues (thread_p, &cur_oid, &cur_record, NULL, + p_func_idx_info->expr->cache_attrinfo); + if (ret != NO_ERROR) + { + return ret; + } + } + + if (n_attrs == 1) + { + /* Single column index. */ + ret = heap_attrinfo_read_dbvalues (thread_p, &cur_oid, &cur_record, NULL, attr_info); + if (ret != NO_ERROR) + { + return ret; + } + } + + p_prefix_length = NULL; + if (attrs_prefix_length) + { + p_prefix_length = &(attrs_prefix_length[0]); + } + + /* Generate the key. */ + p_dbvalue = heap_attrinfo_generate_key (thread_p, n_attrs, &attrids[attr_offset], p_prefix_length, attr_info, + &cur_record, &dbvalue, aligned_midxkey_buf, p_func_idx_info); + if (p_dbvalue == NULL) + { + return ER_FAILED; + } + + /* Dispatch the insert operation */ + ret = btree_online_index_dispatcher (thread_p, btid_int->sys_btid, p_dbvalue, &class_oids[cur_class], &cur_oid, + unique_pk, BTREE_OP_ONLINE_INDEX_IB_INSERT, NULL); + /* Clear the index key. */ + pr_clear_value (p_dbvalue); + + if (ret != NO_ERROR) + { + return ret; + } + } + + return ret; +} diff --git a/src/storage/btree_load.h b/src/storage/btree_load.h index 3274b1b79af..8f96ab665bd 100644 --- a/src/storage/btree_load.h +++ b/src/storage/btree_load.h @@ -257,6 +257,7 @@ extern void btree_rv_nodehdr_dump (FILE * fp, int length, void *data); extern void btree_rv_mvcc_save_increments (BTID * btid, int key_delta, int oid_delta, int null_delta, RECDES * recdes); extern bool btree_clear_key_value (bool * clear_flag, DB_VALUE * key_value); +extern void btree_init_temp_key_value (bool * clear_flag, DB_VALUE * key_value); extern int btree_create_overflow_key_file (THREAD_ENTRY * thread_p, BTID_INT * btid); extern int btree_init_overflow_header (THREAD_ENTRY * thread_p, PAGE_PTR page_ptr, BTREE_OVERFLOW_HEADER * ovf_header); extern int btree_init_node_header (THREAD_ENTRY * thread_p, const VFID * vfid, PAGE_PTR page_ptr, diff --git a/src/storage/catalog_class.c b/src/storage/catalog_class.c index ea7513790f0..90fc12dbfdd 100644 --- a/src/storage/catalog_class.c +++ b/src/storage/catalog_class.c @@ -38,6 +38,7 @@ #include "xserver_interface.h" #include "object_primitive.h" #include "query_dump.h" +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" #include "thread_manager.hpp" @@ -1481,6 +1482,17 @@ catcls_get_or_value_from_attribute (THREAD_ENTRY * thread_p, OR_BUF * buf_p, OR_ attr_val_p->need_clear = true; default_value_len = len; } + else + { + /* update_default exists and default_expr is not a DEFAULT EXPRESSION or does not exist */ + valcnv_convert_value_to_string (attr_val_p); + db_string_truncate (attr_val_p, DB_MAX_IDENTIFIER_LENGTH); + default_str_val = db_get_string (attr_val_p); + if (default_str_val != NULL) + { + default_value_len = strlen (default_str_val); + } + } if (classobj_get_prop (att_props, "update_default", &default_expr) > 0) { diff --git a/src/storage/disk_manager.c b/src/storage/disk_manager.c index 1a8c8b662b8..beb356a2db7 100644 --- a/src/storage/disk_manager.c +++ b/src/storage/disk_manager.c @@ -49,6 +49,7 @@ #include "log_manager.h" #include "critical_section.h" #include "boot_sr.h" +#include "tz_support.h" #include "db_date.h" #include "bit.h" #include "fault_injection.h" @@ -748,7 +749,8 @@ disk_format (THREAD_ENTRY * thread_p, const char *dbname, VOLID volid, DBDEF_VOL } if (ext_info->voltype == DB_PERMANENT_VOLTYPE) { - LSA_SET_INIT_TEMP (&init_with_temp_lsa); + LSA_SET_TEMP_LSA (&init_with_temp_lsa); + /* Flush all dirty pages and then invalidate them from page buffer pool. So that we can reset the recovery * information directly using the io module */ @@ -1978,7 +1980,9 @@ disk_volume_expand (THREAD_ENTRY * thread_p, VOLID volid, DB_VOLTYPE voltype, DK error_code = fileio_expand_to (thread_p, volid, volume_new_npages, voltype); if (error_code != NO_ERROR) { - ASSERT_ERROR (); + // important note - we just committed volume expansion; we cannot afford any failures here + // caller won't update cache!! + assert (false); return error_code; } @@ -6334,6 +6338,8 @@ disk_check_volume (THREAD_ENTRY * thread_p, INT16 volid, bool repair) pgbuf_unfix_and_init (thread_p, page_volheader); } + disk_log ("disk_check_volume", "check volume %d is %s", volid, valid == DISK_VALID ? "valid" : "not valid"); + csect_exit (thread_p, CSECT_DISK_CHECK); return valid; @@ -6420,6 +6426,8 @@ disk_check (THREAD_ENTRY * thread_p, bool repair) } } + disk_log ("disk_check", "first check step is %s", valid == DISK_VALID ? "valid" : "not valid"); + /* release critical section. we will get it for each volume we check, to avoid blocking all reservations and * extensions for a long time. */ csect_exit (thread_p, CSECT_DISK_CHECK); @@ -6519,6 +6527,8 @@ disk_check (THREAD_ENTRY * thread_p, bool repair) } } + disk_log ("disk_check", "full check is %s", "valid"); + /* all valid or all repaired */ csect_exit (thread_p, CSECT_DISK_CHECK); diff --git a/src/storage/double_write_buffer.c b/src/storage/double_write_buffer.c index 62a65caebae..be193059cd8 100644 --- a/src/storage/double_write_buffer.c +++ b/src/storage/double_write_buffer.c @@ -43,11 +43,6 @@ #define DWB_MAX_SIZE (32 * 1024 * 1024) #define DWB_MIN_BLOCKS 1 #define DWB_MAX_BLOCKS 32 -#define DWB_CHECKSUM_ELEMENT_NO_BITS 64 -#define DWB_CHECKSUM_ELEMENT_LOG2_NO_BITS 6 -#define DWB_CHECKSUM_ELEMENT_ALL_BITS 0xffffffffffffffff -#define DWB_CHECKSUM_ELEMENT_NO_FROM_SLOT_POS(bit_pos) ((bit_pos) >> DWB_CHECKSUM_ELEMENT_LOG2_NO_BITS) -#define DWB_CHECKSUM_ELEMENT_BIT_FROM_SLOT_POS(bit_pos) ((bit_pos) & (DWB_CHECKSUM_ELEMENT_NO_BITS - 1)) /* The total number of blocks. */ #define DWB_NUM_TOTAL_BLOCKS (dwb_Global.num_blocks) @@ -61,9 +56,6 @@ /* LOG2 from total number of blocks. */ #define DWB_LOG2_BLOCK_NUM_PAGES (dwb_Global.log2_num_block_pages) -/* The number of checksum elements in each block */ -#define DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK (dwb_Global.checksum_info->num_checksum_elements_in_block) - /* Position mask. */ #define DWB_POSITION_MASK 0x000000003fffffff @@ -166,56 +158,6 @@ #define DWB_GET_NEXT_BLOCK_NO(block_no) \ ((block_no) == (DWB_NUM_TOTAL_BLOCKS - 1) ? 0 : ((block_no) + 1)) -/* Get DWB checksum start position for a block. */ -#define DWB_BLOCK_GET_CHECKSUM_START_POSITION(block_no) \ - (DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK * (block_no)) - -/* Get DWB requested checksum element. */ -#define DWB_GET_REQUESTED_CHECKSUMS_ELEMENT(position) \ - (ATOMIC_INC_64 (&dwb_Global.checksum_info->requested_checksums[position], 0ULL)) - -/* Add value to DWB requested checksum element. */ -#define DWB_ADD_TO_REQUESTED_CHECKSUMS_ELEMENT(position, value) \ - (assert ((ATOMIC_INC_64 (&dwb_Global.checksum_info->requested_checksums[position], 0ULL) & (value)) == 0), \ - ATOMIC_INC_64 (&dwb_Global.checksum_info->requested_checksums[position], value)) - -/* Check whether a value was added to requested checksum element. */ -#define DWB_IS_ADDED_TO_REQUESTED_CHECKSUMS_ELEMENT(position, value) \ - ((DWB_GET_REQUESTED_CHECKSUMS_ELEMENT (element_position) & (value)) == (value)) - -/* Reset a DWB requested checksum element. */ -#define DWB_RESET_REQUESTED_CHECKSUMS_ELEMENT(position) \ - (ATOMIC_TAS_64 (&dwb_Global.checksum_info->requested_checksums[position], 0ULL)) - -/* Get DWB computed checksum element. */ -#define DWB_GET_COMPUTED_CHECKSUMS_ELEMENT(position) \ - (ATOMIC_INC_64 (&dwb_Global.checksum_info->computed_checksums[position], 0ULL)) - -/* Add value to DWB computed checksum element. */ -#define DWB_ADD_TO_COMPUTED_CHECKSUMS_ELEMENT(position, value) \ - (assert ((ATOMIC_INC_64 (&dwb_Global.checksum_info->computed_checksums[position], 0ULL) & (value)) == 0), \ - ATOMIC_INC_64 (&dwb_Global.checksum_info->computed_checksums[position], value)) - -/* Check whether a value was added to computed checksum element. */ -#define DWB_IS_ADDED_TO_COMPUTED_CHECKSUMS_ELEMENT(position, value) \ - ((DWB_GET_COMPUTED_CHECKSUMS_ELEMENT (element_position) & (value)) == (value)) - -/* Reset a DWB computed checksum element. */ -#define DWB_RESET_COMPUTED_CHECKSUMS_ELEMENT(position) \ - (ATOMIC_TAS_64 (&dwb_Global.checksum_info->computed_checksums[position], 0ULL)) - -/* Get DWB completed checksum element. */ -#define DWB_GET_COMPLETED_CHECKSUMS_ELEMENT(position) \ - (ATOMIC_INC_64 (&dwb_Global.checksum_info->completed_checksums_mask[position], 0ULL)) - -/* Get DWB position in checksums element. */ -#define DWB_GET_POSITION_IN_CHECKSUMS_ELEMENT(element_pos) \ - (ATOMIC_INC_32 (&dwb_Global.checksum_info->positions[element_pos], 0)) - -/* Reset position in DWB checksums element. */ -#define DWB_RESET_POSITION_IN_CHECKSUMS_ELEMENT(element_pos) \ - (ATOMIC_TAS_32 (&dwb_Global.checksum_info->positions[element_pos], 0)) - /* Get block version. */ #define DWB_GET_BLOCK_VERSION(block) \ (ATOMIC_INC_64 (&block->version, 0ULL)) @@ -228,13 +170,6 @@ struct double_write_wait_queue_entry DWB_WAIT_QUEUE_ENTRY *next; /* The next queue entry field. */ }; -/* Slot checksum status. */ -typedef enum -{ - DWB_SLOT_CHECKSUM_NOT_COMPUTED, /* The checksum for data contained in slot was not computed. */ - DWB_SLOT_CHECKSUM_COMPUTED /* The checksum for data contained in slot was computed. */ -} DWB_SLOT_CHECKSUM_STATUS; - /* DWB queue. */ typedef struct double_write_wait_queue DWB_WAIT_QUEUE; struct double_write_wait_queue @@ -248,19 +183,6 @@ struct double_write_wait_queue }; #define DWB_WAIT_QUEUE_INITIALIZER {NULL, NULL, NULL, 0, 0} -/* DWB checksum information. Used to allow parallel computation of checksums. */ -typedef struct dwb_checksum_info DWB_CHECKSUM_INFO; -struct dwb_checksum_info -{ - volatile UINT64 *requested_checksums; /* Bits array - 1 for each slot requiring checksum computation. */ - volatile UINT64 *computed_checksums; /* Bits array - 1 for each slot having checksum computed. */ - volatile UINT64 *completed_checksums_mask; /* Bits array - mask for block having all checksums computed. */ - volatile int *positions; /* Positions in checksum arrays, used to search bits faster. */ - unsigned int length; /* The length of checksum bits arrays. */ - unsigned int num_checksum_elements_in_block; /* The number of checksum elements in each block */ -}; - - /* Flush volume status. */ typedef enum { @@ -333,8 +255,6 @@ struct double_write_buffer volatile unsigned int blocks_flush_counter; /* The blocks flush counter. */ volatile unsigned int next_block_to_flush; /* Next block to flush */ - DWB_CHECKSUM_INFO *checksum_info; /* The checksum info. */ - pthread_mutex_t mutex; /* The mutex to protect the wait queue. */ DWB_WAIT_QUEUE wait_queue; /* The wait queue, used when the DWB structure changed. */ @@ -360,7 +280,6 @@ static DOUBLE_WRITE_BUFFER dwb_Global = { 0, /* log2_num_block_pages */ 0, /* blocks_flush_counter */ 0, /* next_block_to_flush */ - NULL, /* checksum_info */ PTHREAD_MUTEX_INITIALIZER, /* mutex */ DWB_WAIT_QUEUE_INITIALIZER, /* wait_queue */ 0, /* position_with_flags */ @@ -415,10 +334,10 @@ static int dwb_compare_vol_fd (const void *v1, const void *v2); STATIC_INLINE FLUSH_VOLUME_INFO *dwb_add_volume_to_block_flush_area (THREAD_ENTRY * thread_p, DWB_BLOCK * block, int vol_fd) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE int dwb_write_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, DWB_SLOT * p_dwb_slots, - unsigned int ordered_slots_length, bool remove_from_hash) - __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE int dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_position_with_flags) + unsigned int ordered_slots_length, bool helper_can_flush, bool remove_from_hash) __attribute__ ((ALWAYS_INLINE)); +STATIC_INLINE int dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, bool helper_can_flush, + UINT64 * current_position_with_flags) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE void dwb_init_slot (DWB_SLOT * slot) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE int dwb_acquire_next_slot (THREAD_ENTRY * thread_p, bool can_wait, DWB_SLOT ** p_dwb_slot) __attribute__ ((ALWAYS_INLINE)); @@ -445,31 +364,10 @@ STATIC_INLINE void dwb_initialize_block (DWB_BLOCK * block, unsigned int block_n STATIC_INLINE int dwb_create_blocks (THREAD_ENTRY * thread_p, unsigned int num_blocks, unsigned int num_block_pages, DWB_BLOCK ** p_blocks) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE void dwb_finalize_block (DWB_BLOCK * block) __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE void dwb_initialize_checksum_info (DWB_CHECKSUM_INFO * checksum_info, UINT64 * requested_checksums, - UINT64 * computed_checksums, int *positions, - UINT64 * completed_checksums_mask, unsigned int checksum_length, - unsigned int num_checksum_elements_in_block) - __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE int dwb_create_checksum_info (THREAD_ENTRY * thread_p, unsigned int num_blocks, - unsigned int num_block_pages, DWB_CHECKSUM_INFO ** p_checksum_info) - __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE void dwb_add_checksum_computation_request (THREAD_ENTRY * thread_p, unsigned int block_no, - unsigned int position_in_block) - __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE void dwb_mark_checksum_computed (THREAD_ENTRY * thread_p, unsigned int block_no, - unsigned int position_in_block) __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE void dwb_finalize_checksum_info (DWB_CHECKSUM_INFO * checksum_info) __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE bool dwb_needs_speedup_checksum_computation (THREAD_ENTRY * thread_p) __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE int dwb_slot_compute_checksum (THREAD_ENTRY * thread_p, DWB_SLOT * slot, bool mark_checksum_computed, - bool * checksum_computed) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE int dwb_create_internal (THREAD_ENTRY * thread_p, const char *dwb_volume_name, UINT64 * current_position_with_flags) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE void dwb_get_next_block_for_flush (THREAD_ENTRY * thread_p, unsigned int *block_no) __attribute__ ((ALWAYS_INLINE)); -STATIC_INLINE bool dwb_block_has_all_checksums_computed (unsigned int block_no); -STATIC_INLINE int dwb_compute_block_checksums (THREAD_ENTRY * thread_p, DWB_BLOCK * block, - bool * block_slots_checksum_computed, bool * block_needs_flush) - __attribute__ ((ALWAYS_INLINE)); /* Slots hash functions. */ static void *dwb_slots_hash_entry_alloc (void); @@ -487,20 +385,16 @@ STATIC_INLINE int dwb_slots_hash_delete (THREAD_ENTRY * thread_p, DWB_SLOT * slo #if defined (SERVER_MODE) static cubthread::daemon *dwb_flush_block_daemon = NULL; static cubthread::daemon *dwb_flush_block_helper_daemon = NULL; -static cubthread::daemon *dwb_checkum_computation_daemon = NULL; #endif // *INDENT-ON* static bool dwb_is_flush_block_daemon_available (void); static bool dwb_is_flush_block_helper_daemon_available (void); -static bool dwb_is_checksum_computation_daemon_available (void); static bool dwb_flush_block_daemon_is_running (void); static bool dwb_flush_block_helper_daemon_is_running (void); -static bool dwb_checksum_computation_daemon_is_running (void); static int dwb_flush_block_helper (THREAD_ENTRY * thread_p); -static int dwb_compute_checksums (THREAD_ENTRY * thread_p); static int dwb_flush_next_block (THREAD_ENTRY * thread_p); #if !defined (NDEBUG) @@ -921,8 +815,8 @@ dwb_starts_structure_modification (THREAD_ENTRY * thread_p, UINT64 * current_pos while (!ATOMIC_CAS_64 (&dwb_Global.position_with_flags, local_current_position_with_flags, new_position_with_flags)); #if defined(SERVER_MODE) - while (dwb_flush_block_daemon_is_running () || dwb_flush_block_helper_daemon_is_running () - || dwb_checksum_computation_daemon_is_running ()) + while ((ATOMIC_INC_32 (&dwb_Global.blocks_flush_counter, 0) > 0) + || dwb_flush_block_daemon_is_running () || dwb_flush_block_helper_daemon_is_running ()) { /* Can't modify structure while flush thread can access DWB. */ thread_sleep (20); @@ -963,8 +857,9 @@ dwb_starts_structure_modification (THREAD_ENTRY * thread_p, UINT64 * current_pos { if (DWB_IS_BLOCK_WRITE_STARTED (local_current_position_with_flags, block_no)) { - /* Flush all pages from current block */ - error_code = dwb_flush_block (thread_p, &dwb_Global.blocks[block_no], &local_current_position_with_flags); + /* Flush all pages from current block. I must flush all remaining data. */ + error_code = + dwb_flush_block (thread_p, &dwb_Global.blocks[block_no], false, &local_current_position_with_flags); if (error_code != NO_ERROR) { /* Something wrong happened. */ @@ -1035,348 +930,6 @@ dwb_initialize_slot (DWB_SLOT * slot, FILEIO_PAGE * io_page, unsigned int positi slot->position_in_block = position_in_block; slot->block_no = block_no; - slot->checksum_status = DWB_SLOT_CHECKSUM_NOT_COMPUTED; -} - -/* - * dwb_initialize_checksum_info () - Initialize checksum info. - * - * return : Nothing. - * checksum_info (in/out) : The checksum info. - * requested_checksums (in) : Bits array containing checksum requests for slot data. - * computed_checksums (in): Bits array containing computed checksums for slot data. - * positions (in): Positions in checksum arrays. - * completed_checksums_mask(in): Mask for block having all checksums computed. - * checksum_length(in): The length of checksum bits arrays. - * num_checksum_elements_in_block(in): The number of checksum elements in each block. - */ -STATIC_INLINE void -dwb_initialize_checksum_info (DWB_CHECKSUM_INFO * checksum_info, UINT64 * requested_checksums, - UINT64 * computed_checksums, int *positions, UINT64 * completed_checksums_mask, - unsigned int checksum_length, unsigned int num_checksum_elements_in_block) -{ - assert (checksum_info != NULL); - - checksum_info->requested_checksums = requested_checksums; - checksum_info->computed_checksums = computed_checksums; - checksum_info->positions = positions; - checksum_info->completed_checksums_mask = completed_checksums_mask; - checksum_info->length = checksum_length; - checksum_info->num_checksum_elements_in_block = num_checksum_elements_in_block; -} - -/* - * dwb_create_checksum_info () - Create checksum info. - * - * return : Error code. - * thread_p (in) : The thread entry. - * num_blocks(in): The number of blocks. - * num_block_pages(in): The number of block pages. - * p_checksum_info(out): The created checksum info. - */ -STATIC_INLINE int -dwb_create_checksum_info (THREAD_ENTRY * thread_p, unsigned int num_blocks, unsigned int num_block_pages, - DWB_CHECKSUM_INFO ** p_checksum_info) -{ - UINT64 *requested_checksums = NULL, *computed_checksums = NULL, *completed_checksums_mask = NULL; - int *positions = NULL; - unsigned int checksum_length, num_checksum_elements_in_block; - unsigned int i, num_pages2; - DWB_CHECKSUM_INFO *checksum_info = NULL; - int error_code; - - checksum_info = (DWB_CHECKSUM_INFO *) malloc (sizeof (DWB_CHECKSUM_INFO)); - if (checksum_info == NULL) - { - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, sizeof (DWB_CHECKSUM_INFO)); - error_code = ER_OUT_OF_VIRTUAL_MEMORY; - goto exit_on_error; - } - - num_checksum_elements_in_block = (DB_ALIGN (num_block_pages, DWB_CHECKSUM_ELEMENT_NO_BITS) - / DWB_CHECKSUM_ELEMENT_NO_BITS); - checksum_length = num_checksum_elements_in_block * num_blocks; - - /* requested_checksums */ - requested_checksums = (UINT64 *) malloc (checksum_length * sizeof (UINT64)); - if (requested_checksums == NULL) - { - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, checksum_length * sizeof (UINT64)); - error_code = ER_OUT_OF_VIRTUAL_MEMORY; - goto exit_on_error; - } - memset (requested_checksums, 0, checksum_length * sizeof (UINT64)); - - /* computed_checksums */ - computed_checksums = (UINT64 *) malloc (checksum_length * sizeof (UINT64)); - if (computed_checksums == NULL) - { - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, checksum_length * sizeof (UINT64)); - error_code = ER_OUT_OF_VIRTUAL_MEMORY; - goto exit_on_error; - } - memset (computed_checksums, 0, checksum_length * sizeof (UINT64)); - - /* positions */ - positions = (int *) malloc (checksum_length * sizeof (int)); - if (positions == NULL) - { - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, checksum_length * sizeof (int)); - error_code = ER_OUT_OF_VIRTUAL_MEMORY; - goto exit_on_error; - } - memset (positions, 0, checksum_length * sizeof (int)); - - /* completed_checksums_mask */ - completed_checksums_mask = (UINT64 *) malloc (num_checksum_elements_in_block * sizeof (UINT64)); - if (completed_checksums_mask == NULL) - { - er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, - num_checksum_elements_in_block * sizeof (UINT64)); - error_code = ER_OUT_OF_VIRTUAL_MEMORY; - goto exit_on_error; - } - - for (num_pages2 = num_block_pages, i = 0; num_pages2 >= DWB_CHECKSUM_ELEMENT_NO_BITS; - num_pages2 -= DWB_CHECKSUM_ELEMENT_NO_BITS, i++) - { - completed_checksums_mask[i] = DWB_CHECKSUM_ELEMENT_ALL_BITS; - } - if (num_pages2 > 0) - { - assert (num_pages2 < DWB_CHECKSUM_ELEMENT_NO_BITS && i < num_checksum_elements_in_block); - completed_checksums_mask[i] = (1 << num_pages2) - 1; - } - - /* init checksum_info */ - dwb_initialize_checksum_info (checksum_info, requested_checksums, computed_checksums, - positions, completed_checksums_mask, checksum_length, num_checksum_elements_in_block); - - *p_checksum_info = checksum_info; - - return NO_ERROR; - -exit_on_error: - if (completed_checksums_mask != NULL) - { - free_and_init (completed_checksums_mask); - } - - if (positions != NULL) - { - free_and_init (positions); - } - - if (requested_checksums != NULL) - { - free_and_init (requested_checksums); - } - - if (computed_checksums != NULL) - { - free_and_init (computed_checksums); - } - - if (checksum_info != NULL) - { - free_and_init (checksum_info); - } - - return error_code; -} - -/* - * dwb_add_checksum_computation_request () - Add a checksum computation request. - * - * return : Nothing. - * thread_p(in): The thread entry. - * block_no(in): The block number. - * position_in_block(in): The position in block, where checksum computation is requested. - */ -STATIC_INLINE void -dwb_add_checksum_computation_request (THREAD_ENTRY * thread_p, unsigned int block_no, unsigned int position_in_block) -{ - unsigned int element_position; - UINT64 value; - - assert (block_no < DWB_NUM_TOTAL_BLOCKS && position_in_block < DWB_BLOCK_NUM_PAGES); - - element_position = (DWB_BLOCK_GET_CHECKSUM_START_POSITION (block_no) - + DWB_CHECKSUM_ELEMENT_NO_FROM_SLOT_POS (position_in_block)); - - value = 1ULL << DWB_CHECKSUM_ELEMENT_BIT_FROM_SLOT_POS (position_in_block); - - assert (!DWB_IS_ADDED_TO_COMPUTED_CHECKSUMS_ELEMENT (element_position, value)); - - DWB_ADD_TO_REQUESTED_CHECKSUMS_ELEMENT (element_position, value); -} - -/* - * dwb_mark_checksum_computed () - Mark checksum computed into bits array. - * - * return : Nothing. - * thread_p(in): The thread entry. - * block_no(in): The block number. - * position_in_block(in): The position in block. - */ -STATIC_INLINE void -dwb_mark_checksum_computed (THREAD_ENTRY * thread_p, unsigned int block_no, unsigned int position_in_block) -{ - unsigned int element_position; - UINT64 value; - - assert (block_no < DWB_NUM_TOTAL_BLOCKS && position_in_block < DWB_BLOCK_NUM_PAGES); - - element_position = (DWB_BLOCK_GET_CHECKSUM_START_POSITION (block_no) - + DWB_CHECKSUM_ELEMENT_NO_FROM_SLOT_POS (position_in_block)); - - value = 1ULL << DWB_CHECKSUM_ELEMENT_BIT_FROM_SLOT_POS (position_in_block); - - assert (DWB_IS_ADDED_TO_REQUESTED_CHECKSUMS_ELEMENT (element_position, value)); - - DWB_ADD_TO_COMPUTED_CHECKSUMS_ELEMENT (element_position, value); -} - -/* - * dwb_finalize_checksum_info () - Finalize checksum info. - * - * return : Nothing. - * checksum_info (in/out) : The checksum info. - */ -STATIC_INLINE void -dwb_finalize_checksum_info (DWB_CHECKSUM_INFO * checksum_info) -{ - assert (checksum_info != NULL); - - if (checksum_info->requested_checksums != NULL) - { - free_and_init (checksum_info->requested_checksums); - } - - if (checksum_info->computed_checksums != NULL) - { - free_and_init (checksum_info->computed_checksums); - } - - if (checksum_info->positions != NULL) - { - free_and_init (checksum_info->positions); - } - - if (checksum_info->completed_checksums_mask != NULL) - { - free_and_init (checksum_info->completed_checksums_mask); - } -} - -/* - * dwb_needs_speedup_checksum_computation () - Check whether checksum computation is too slow. - * - * return : Error code. - * thread_p (in) : thread entry - * - * Note: This function checks whether checksum thread remains behind. Currently, we consider that it remains - * behind if at least three pages are waiting for checksum computation. This computation is relative, since - * while computing, the checksum thread may advance. - */ -STATIC_INLINE bool -dwb_needs_speedup_checksum_computation (THREAD_ENTRY * thread_p) -{ -#define DWB_CHECKSUM_REQUESTS_THRESHOLD 16 - int position_in_checksum_element, position; - UINT64 requested_checksums_elem, bit_mask; - unsigned int element_position, num_elements, counter; - - num_elements = DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK * DWB_NUM_TOTAL_BLOCKS; - - counter = 0; - for (element_position = 0; element_position < num_elements; element_position++) - { - requested_checksums_elem = DWB_GET_REQUESTED_CHECKSUMS_ELEMENT (element_position); - position_in_checksum_element = DWB_GET_POSITION_IN_CHECKSUMS_ELEMENT (element_position); - if (position_in_checksum_element >= DWB_CHECKSUM_ELEMENT_NO_BITS) - { - continue; - } - - bit_mask = 1ULL << position_in_checksum_element; - for (position = position_in_checksum_element; position < DWB_CHECKSUM_ELEMENT_NO_BITS; position++) - { - if ((requested_checksums_elem & bit_mask) == 0) - { - /* Stop searching bits */ - break; - } - - counter++; - bit_mask = bit_mask << 1; - } - - /* Check counter after each element. */ - if (counter >= DWB_CHECKSUM_REQUESTS_THRESHOLD) - { - return true; - } - } - - return false; - -#undef DWB_CHECKSUM_REQUESTS_THRESHOLD -} - -/* - * dwb_slot_compute_checksum () - Compute checksum for page contained in a slot. - * - * return : Error code - * thread_p (in) : Thread entry. - * slot(in/out): DWB slot - * mark_checksum_computed(in): True, if computed checksum must be marked in bits array. - * checksum_computed(out): True, if checksum is computed now. - */ -STATIC_INLINE int -dwb_slot_compute_checksum (THREAD_ENTRY * thread_p, DWB_SLOT * slot, bool mark_checksum_computed, - bool * checksum_computed) -{ - int error_code = NO_ERROR; - PERF_UTIME_TRACKER time_track; - - assert (slot != NULL); - - *checksum_computed = false; - if (!ATOMIC_CAS_32 (&slot->checksum_status, DWB_SLOT_CHECKSUM_NOT_COMPUTED, DWB_SLOT_CHECKSUM_COMPUTED)) - { - /* Already computed */ - return NO_ERROR; - } - - PERF_UTIME_TRACKER_START (thread_p, &time_track); - - if (!VPID_ISNULL (&slot->vpid)) - { - error_code = fileio_set_page_checksum (thread_p, slot->io_page); - if (error_code != NO_ERROR) - { - assert (false); - /* Restore it. */ - ATOMIC_TAS_32 (&slot->checksum_status, DWB_SLOT_CHECKSUM_NOT_COMPUTED); - return error_code; - } - } - else - { - /* The slot page will not be written on data volume. */ - assert (slot->io_page->prv.checksum == 0); - } - - PERF_UTIME_TRACKER_TIME (thread_p, &time_track, PSTAT_DWB_PAGE_CHECKSUM_TIME_COUNTERS); - - if (mark_checksum_computed) - { - dwb_mark_checksum_computed (thread_p, slot->block_no, slot->position_in_block); - } - - *checksum_computed = true; - return NO_ERROR; } /* @@ -1559,7 +1112,7 @@ dwb_create_blocks (THREAD_ENTRY * thread_p, unsigned int num_blocks, unsigned in { io_page = (FILEIO_PAGE *) (blocks_write_buffer[i] + j * IO_PAGESIZE); - fileio_initialize_res (thread_p, &io_page->prv); + fileio_initialize_res (thread_p, io_page, IO_PAGESIZE); dwb_initialize_slot (&slots[i][j], io_page, j, i); } @@ -1644,7 +1197,6 @@ dwb_create_internal (THREAD_ENTRY * thread_p, const char *dwb_volume_name, UINT6 unsigned int i, num_pages, num_block_pages; int vdes = NULL_VOLDES; DWB_BLOCK *blocks = NULL; - DWB_CHECKSUM_INFO *checksum_info = NULL; DWB_SLOTS_HASH *slots_hash = NULL; UINT64 new_position_with_flags; @@ -1686,12 +1238,6 @@ dwb_create_internal (THREAD_ENTRY * thread_p, const char *dwb_volume_name, UINT6 goto exit_on_error; } - error_code = dwb_create_checksum_info (thread_p, num_blocks, num_block_pages, &checksum_info); - if (error_code != NO_ERROR) - { - goto exit_on_error; - } - error_code = dwb_create_slots_hash (thread_p, &slots_hash); if (error_code != NO_ERROR) { @@ -1705,7 +1251,6 @@ dwb_create_internal (THREAD_ENTRY * thread_p, const char *dwb_volume_name, UINT6 dwb_Global.log2_num_block_pages = (unsigned int) (log ((float) num_block_pages) / log ((float) 2)); dwb_Global.blocks_flush_counter = 0; dwb_Global.next_block_to_flush = 0; - dwb_Global.checksum_info = checksum_info; pthread_mutex_init (&dwb_Global.mutex, NULL); dwb_init_wait_queue (&dwb_Global.wait_queue); dwb_Global.slots_hash = slots_hash; @@ -1740,12 +1285,6 @@ dwb_create_internal (THREAD_ENTRY * thread_p, const char *dwb_volume_name, UINT6 free_and_init (blocks); } - if (checksum_info != NULL) - { - dwb_finalize_checksum_info (checksum_info); - free_and_init (checksum_info); - } - if (slots_hash != NULL) { dwb_finalize_slots_hash (slots_hash); @@ -1926,7 +1465,7 @@ dwb_slots_hash_insert (THREAD_ENTRY * thread_p, VPID * vpid, DWB_SLOT * slot, in /* Invalidate the old slot, if is in the same block. We want to avoid duplicates in block at flush. */ assert (slots_hash_entry->slot->position_in_block < slot->position_in_block); VPID_SET_NULL (&slots_hash_entry->slot->vpid); - fileio_initialize_res (thread_p, &(slots_hash_entry->slot->io_page->prv)); + fileio_initialize_res (thread_p, slots_hash_entry->slot->io_page, IO_PAGESIZE); _er_log_debug (ARG_FILE_LINE, "Found same page with same LSA in same block - %d - at positions (%d, %d) \n", @@ -2000,12 +1539,6 @@ dwb_destroy_internal (THREAD_ENTRY * thread_p, UINT64 * current_position_with_fl free_and_init (dwb_Global.blocks); } - if (dwb_Global.checksum_info != NULL) - { - dwb_finalize_checksum_info (dwb_Global.checksum_info); - free_and_init (dwb_Global.checksum_info); - } - if (dwb_Global.slots_hash != NULL) { dwb_finalize_slots_hash (dwb_Global.slots_hash); @@ -2523,12 +2056,13 @@ dwb_add_volume_to_block_flush_area (THREAD_ENTRY * thread_p, DWB_BLOCK * block, * p_dwb_ordered_slots(in): The slots that gives the pages flush order. * ordered_slots_length(in): The ordered slots array length. * remove_from_hash(in): True, if needs to remove entries from hash. + * helper_can_flush(in): True, if helper can flush. * * Note: This function fills to_flush_vdes array with the volumes that must be flushed. */ STATIC_INLINE int dwb_write_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, DWB_SLOT * p_dwb_ordered_slots, - unsigned int ordered_slots_length, bool remove_from_hash) + unsigned int ordered_slots_length, bool helper_can_flush, bool remove_from_hash) { VOLID last_written_volid; unsigned int i; @@ -2621,7 +2155,7 @@ dwb_write_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, DWB_SLOT * p_dwb_or ATOMIC_INC_32 (¤t_flush_volume_info->num_pages, 1); count_writes++; - if ((count_writes >= num_pages_to_sync || can_flush_volume == true) + if (helper_can_flush && (count_writes >= num_pages_to_sync || can_flush_volume == true) && dwb_is_flush_block_helper_daemon_available ()) { if (ATOMIC_CAS_ADDR (&dwb_Global.helper_flush_block, (DWB_BLOCK *) NULL, block)) @@ -2652,7 +2186,7 @@ dwb_write_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, DWB_SLOT * p_dwb_or #endif #if defined (SERVER_MODE) - if ((dwb_Global.helper_flush_block == NULL) && (block->count_flush_volumes_info > 0)) + if (helper_can_flush && (dwb_Global.helper_flush_block == NULL) && (block->count_flush_volumes_info > 0)) { /* If helper_flush_block is NULL, it means that the flush helper thread does not run and was not woken yet. */ if (dwb_is_flush_block_helper_daemon_available () @@ -2701,18 +2235,19 @@ dwb_write_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, DWB_SLOT * p_dwb_or * return : Error code. * thread_p (in): Thread entry. * block(in): The block that needs flush. + * helper_can_flush(in): True, if helper thread can flush. * current_position_with_flags(out): Current position with flags. * * Note: The block pages can't be modified by others during flush. */ STATIC_INLINE int -dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_position_with_flags) +dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, bool helper_can_flush, + UINT64 * current_position_with_flags) { UINT64 local_current_position_with_flags, new_position_with_flags; int error_code = NO_ERROR; DWB_SLOT *p_dwb_ordered_slots = NULL; unsigned int i, ordered_slots_length; - unsigned int block_checksum_element_position, block_checksum_start_position, element_position; PERF_UTIME_TRACKER time_track; int num_pages; unsigned int current_block_to_flush, next_block_to_flush; @@ -2766,7 +2301,7 @@ dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_po assert (s1->position_in_block < DWB_BLOCK_NUM_PAGES); VPID_SET_NULL (&(block->slots[s1->position_in_block].vpid)); - fileio_initialize_res (thread_p, &(s1->io_page->prv)); + fileio_initialize_res (thread_p, s1->io_page, IO_PAGESIZE); } /* Check for WAL protocol. */ @@ -2863,7 +2398,7 @@ dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_po dwb_log ("dwb_flush_block: DWB synchronized\n"); /* Now, write and flush the original location. */ - error_code = dwb_write_block (thread_p, block, p_dwb_ordered_slots, ordered_slots_length, true); + error_code = dwb_write_block (thread_p, block, p_dwb_ordered_slots, ordered_slots_length, helper_can_flush, true); if (error_code != NO_ERROR) { assert (false); @@ -2885,11 +2420,18 @@ dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_po } #if defined (SERVER_MODE) - if ((num_pages > max_pages_to_sync) && dwb_is_flush_block_helper_daemon_available ()) + if (helper_can_flush == true) { - /* Let the helper thread to flush volumes having many pages. */ - assert (dwb_Global.helper_flush_block != NULL); - continue; + if ((num_pages > max_pages_to_sync) && dwb_is_flush_block_helper_daemon_available ()) + { + /* Let the helper thread to flush volumes having many pages. */ + assert (dwb_Global.helper_flush_block != NULL); + continue; + } + } + else + { + assert (dwb_Global.helper_flush_block == NULL); } #endif @@ -2916,24 +2458,9 @@ dwb_flush_block (THREAD_ENTRY * thread_p, DWB_BLOCK * block, UINT64 * current_po perfmon_db_flushed_block_volumes (thread_p, block->count_flush_volumes_info); } - if (prm_get_bool_value (PRM_ID_ENABLE_DWB_CHECKSUM_THREAD) == true) - { - block_checksum_start_position = DWB_BLOCK_GET_CHECKSUM_START_POSITION (block->block_no); - - for (block_checksum_element_position = 0; block_checksum_element_position < DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK; - block_checksum_element_position++) - { - element_position = block_checksum_start_position + block_checksum_element_position; - - DWB_RESET_REQUESTED_CHECKSUMS_ELEMENT (element_position); - DWB_RESET_COMPUTED_CHECKSUMS_ELEMENT (element_position); - DWB_RESET_POSITION_IN_CHECKSUMS_ELEMENT (element_position); - } - } - -#if defined (SERVER_MODE) - assert (block->count_wb_pages == DWB_BLOCK_NUM_PAGES); -#endif + /* The block is full or there is only one thread that access DWB. */ + assert (block->count_wb_pages == DWB_BLOCK_NUM_PAGES + || DWB_IS_MODIFYING_STRUCTURE (ATOMIC_INC_64 (&dwb_Global.position_with_flags, 0LL))); ATOMIC_TAS_32 (&block->count_wb_pages, 0); ATOMIC_INC_64 (&block->version, 1ULL); @@ -3119,8 +2646,6 @@ dwb_acquire_next_slot (THREAD_ENTRY * thread_p, bool can_wait, DWB_SLOT ** p_dwb assert ((*p_dwb_slot)->position_in_block == position_in_current_block); - ATOMIC_TAS_32 (&(*p_dwb_slot)->checksum_status, DWB_SLOT_CHECKSUM_NOT_COMPUTED); - return NO_ERROR; } @@ -3148,9 +2673,10 @@ dwb_set_slot_data (THREAD_ENTRY * thread_p, DWB_SLOT * dwb_slot, FILEIO_PAGE * i else { /* Initialize page for consistency. */ - fileio_initialize_res (thread_p, &(dwb_slot->io_page->prv)); + fileio_initialize_res (thread_p, dwb_slot->io_page, IO_PAGESIZE); } + assert (fileio_is_page_sane (io_page_p, IO_PAGESIZE)); LSA_COPY (&dwb_slot->lsa, &io_page_p->prv.lsa); VPID_SET (&dwb_slot->vpid, io_page_p->prv.volid, io_page_p->prv.pageid); } @@ -3181,222 +2707,18 @@ dwb_init_slot (DWB_SLOT * slot) STATIC_INLINE void dwb_get_next_block_for_flush (THREAD_ENTRY * thread_p, unsigned int *block_no) { - unsigned int block_checksum_start_position, block_checksum_element_position, element_position; - bool found; - assert (block_no != NULL); *block_no = DWB_NUM_TOTAL_BLOCKS; - /* First, check whether the next block can be flushed. Then check whether its whole slots checksum were computed. */ + /* check whether the next block can be flushed. */ if (dwb_Global.blocks[dwb_Global.next_block_to_flush].count_wb_pages != DWB_BLOCK_NUM_PAGES) { /* Next block is not full yet. */ return; } - found = true; - if (prm_get_bool_value (PRM_ID_ENABLE_DWB_CHECKSUM_THREAD) == true) - { - block_checksum_start_position = DWB_BLOCK_GET_CHECKSUM_START_POSITION (dwb_Global.next_block_to_flush); - - for (block_checksum_element_position = 0; block_checksum_element_position < DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK; - block_checksum_element_position++) - { - element_position = block_checksum_start_position + block_checksum_element_position; - - if (DWB_GET_COMPUTED_CHECKSUMS_ELEMENT (element_position) - != DWB_GET_COMPLETED_CHECKSUMS_ELEMENT (block_checksum_element_position)) - { - /* Needs to compute all checksums before flush. */ - found = false; - break; - } - } - } - - if (found) - { - *block_no = dwb_Global.next_block_to_flush; - } -} - -/* - * dwb_block_has_all_checksums_computed(): Checks whether the block has all checksum computed. - * - * returns: True, if all block checksums computed. - * block_no(out): The block number. - */ -STATIC_INLINE bool -dwb_block_has_all_checksums_computed (unsigned int block_no) -{ - unsigned int block_checksum_start_position, block_checksum_element_position, element_position; - bool found; - - /* First, search for blocks that must be flushed, to avoid delays caused by checksum computation in other block. */ - found = true; - block_checksum_start_position = DWB_BLOCK_GET_CHECKSUM_START_POSITION (block_no); - - for (block_checksum_element_position = 0; block_checksum_element_position < DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK; - block_checksum_element_position++) - { - element_position = block_checksum_start_position + block_checksum_element_position; - - if (DWB_GET_COMPUTED_CHECKSUMS_ELEMENT (element_position) - != DWB_GET_COMPLETED_CHECKSUMS_ELEMENT (block_checksum_element_position)) - { - found = false; - break; - } - } - - return found; -} - -/* - * dwb_compute_block_checksums(): Computes checksums for requested slots in specified block. - * - * returns: Error code. - * thread_p (in): The thread entry. - * block(int): The DWB block. - * block_slots_checksum_computed (out); True, if checksums of some slots in block are computed. - * block_needs_flush (out): True, if block needs flush - checksum of all slots computed. - */ -STATIC_INLINE int -dwb_compute_block_checksums (THREAD_ENTRY * thread_p, DWB_BLOCK * block, bool * block_slots_checksum_computed, - bool * block_needs_flush) -{ - UINT64 computed_slots_checksum_elem, requested_checksums_elem, bit_mask, computed_checksum_bits; - int error_code = NO_ERROR, position_in_checksum_element, position; - unsigned int block_checksum_start_position, block_checksum_element_position; - unsigned int element_position, slot_position, slot_base; - bool checksum_computed, slots_checksum_computed, all_slots_checksum_computed; - UINT64 block_version; - - assert (block != NULL); - - slots_checksum_computed = false; - all_slots_checksum_computed = true; - block_checksum_start_position = DWB_BLOCK_GET_CHECKSUM_START_POSITION (block->block_no); - block_version = DWB_GET_BLOCK_VERSION (block); - - for (block_checksum_element_position = 0; block_checksum_element_position < DWB_CHECKSUM_NUM_ELEMENTS_IN_BLOCK; - block_checksum_element_position++) - { - element_position = block_checksum_start_position + block_checksum_element_position; - - while (true) - { - requested_checksums_elem = DWB_GET_REQUESTED_CHECKSUMS_ELEMENT (element_position); - computed_slots_checksum_elem = DWB_GET_COMPUTED_CHECKSUMS_ELEMENT (element_position); - position_in_checksum_element = DWB_GET_POSITION_IN_CHECKSUMS_ELEMENT (element_position); - - /* Before checksum computation, check whether the block was flushed. */ - if (DWB_GET_BLOCK_VERSION (block) != block_version) - { - dwb_log ("Can't computed checksums for block %d. Block version updated from %lld to %lld.\n", - block->block_no, block_version, DWB_GET_BLOCK_VERSION (block)); - break; - } - - if (requested_checksums_elem == 0ULL || requested_checksums_elem == computed_slots_checksum_elem) - { - /* There are no other slots available for checksum computation. */ - break; - } - - /* The checksum bits modified meanwhile, we needs to compute new checksum. */ - if (position_in_checksum_element >= DWB_CHECKSUM_ELEMENT_NO_BITS) - { - break; - } - - slot_base = block_checksum_element_position * DWB_CHECKSUM_ELEMENT_NO_BITS; - bit_mask = 1ULL << position_in_checksum_element; - computed_checksum_bits = 0; - - for (position = position_in_checksum_element; position < DWB_CHECKSUM_ELEMENT_NO_BITS; position++) - { - if ((requested_checksums_elem & bit_mask) == 0) - { - /* Stop searching bits */ - break; - } - - slot_position = slot_base + position; - assert (slot_position < DWB_BLOCK_NUM_PAGES); - - error_code = dwb_slot_compute_checksum (thread_p, &block->slots[slot_position], false, - &checksum_computed); - if (error_code != NO_ERROR) - { - dwb_log_error ("Can't compute checksum for slot %d in block %d\n", - block->slots[slot_position].position_in_block, block->block_no); - return error_code; - } - - /* Add the bit, if checksum was computed by me. */ - if (checksum_computed) - { - computed_checksum_bits |= bit_mask; - slots_checksum_computed = true; - } - - bit_mask = bit_mask << 1; - } - - /* Update computed bits */ - if (computed_checksum_bits == 0) - { - break; - } - else - { - /* Check that no other transaction computed the current slot checksum. */ - assert (DWB_IS_ADDED_TO_REQUESTED_CHECKSUMS_ELEMENT (element_position, computed_checksum_bits)); - assert (!DWB_IS_ADDED_TO_COMPUTED_CHECKSUMS_ELEMENT (element_position, computed_checksum_bits)); - - /* Update start bit position, if possible */ - do - { - position_in_checksum_element = DWB_GET_POSITION_IN_CHECKSUMS_ELEMENT (element_position); - if (position_in_checksum_element >= position) - { - /* Other transaction advanced before me, nothing to do. */ - break; - } - } - while (!ATOMIC_CAS_32 (&dwb_Global.checksum_info->positions[element_position], - position_in_checksum_element, position)); - - assert (DWB_GET_BLOCK_VERSION (block) == block_version); - - DWB_ADD_TO_COMPUTED_CHECKSUMS_ELEMENT (element_position, computed_checksum_bits); - - dwb_log ("Successfully computed checksums for slots %d in block %d having version = %lld\n", - computed_checksum_bits, block->block_no, block_version); - } - } - - if (DWB_GET_COMPUTED_CHECKSUMS_ELEMENT (element_position) - != DWB_GET_COMPLETED_CHECKSUMS_ELEMENT (block_checksum_element_position)) - { - /* The checksum was not computed for all block slots. */ - all_slots_checksum_computed = false; - } - } - - if (block_slots_checksum_computed) - { - *block_slots_checksum_computed = slots_checksum_computed; - } - - if (block_needs_flush) - { - *block_needs_flush = all_slots_checksum_computed; - } - - return NO_ERROR; + *block_no = dwb_Global.next_block_to_flush; } /* @@ -3444,8 +2766,7 @@ dwb_set_data_on_next_slot (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, boo * vpid(in): Page identifier. * p_dwb_slot(in/out): DWB slot where the page content must be added. * - * Note: This thread may decide to compute checksum, in case that checksum thread remains behind. - * Also, this thread may flush the block, if flush thread is not available or we are in stand alone. + * Note: thread may flush the block, if flush thread is not available or we are in stand alone. */ int dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB_SLOT ** p_dwb_slot) @@ -3454,7 +2775,6 @@ dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB int error_code = NO_ERROR, inserted; DWB_BLOCK *block = NULL; DWB_SLOT *dwb_slot = NULL; - bool checksum_computed; bool needs_flush; assert (p_dwb_slot != NULL && (io_page_p != NULL || (*p_dwb_slot)->io_page != NULL) && vpid != NULL); @@ -3493,14 +2813,12 @@ dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB { /* Invalidate the slot to avoid flushing the same data twice. */ VPID_SET_NULL (&dwb_slot->vpid); - fileio_initialize_res (thread_p, &(dwb_slot->io_page->prv)); + fileio_initialize_res (thread_p, dwb_slot->io_page, IO_PAGESIZE); } } dwb_log ("dwb_add_page: added page = (%d,%d) on block (%d) position (%d)\n", vpid->volid, vpid->pageid, dwb_slot->block_no, dwb_slot->position_in_block); - /* Reset checksum. */ - dwb_slot->io_page->prv.checksum = 0; block = &dwb_Global.blocks[dwb_slot->block_no]; count_wb_pages = ATOMIC_INC_32 (&block->count_wb_pages, 1); @@ -3515,34 +2833,6 @@ dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB needs_flush = true; } - if (prm_get_bool_value (PRM_ID_ENABLE_DWB_CHECKSUM_THREAD) == true) - { - dwb_add_checksum_computation_request (thread_p, block->block_no, dwb_slot->position_in_block); - -#if defined (SERVER_MODE) - if (dwb_is_checksum_computation_daemon_available ()) - { - /* Wake up checksum thread to compute checksum. */ - dwb_checkum_computation_daemon->wakeup (); - } - - if (dwb_needs_speedup_checksum_computation (thread_p)) -#endif /* SERVER_MODE */ - { - /* Speed up checksum computation, if is too slow. */ - error_code = dwb_slot_compute_checksum (thread_p, dwb_slot, true, &checksum_computed); - if (error_code != NO_ERROR) - { - dwb_log_error ("Can't compute checksum for slot %d in block %d\n", - dwb_slot->position_in_block, dwb_slot->block_no); - return error_code; - } - - dwb_log ("Successfully computed checksums for slots %d in block %d\n", - dwb_slot->position_in_block, dwb_slot->block_no); - } - } - if (needs_flush == false) { return NO_ERROR; @@ -3557,7 +2847,7 @@ dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB #if defined (SERVER_MODE) /* * Wake ups flush block thread to flush the current block. The current block will be flushed after flushing the - * previous block and after all slots checksum of current block were computed by checksum threads. + * previous block. */ if (dwb_is_flush_block_daemon_available ()) { @@ -3566,27 +2856,10 @@ dwb_add_page (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page_p, VPID * vpid, DWB return NO_ERROR; } - - /* This thread must flush the block, but has to compute checksums first. */ - if (!dwb_block_has_all_checksums_computed (block->block_no)) - { - /* Computes checksum. Checksum thread may also computes checksums but for different block pages. */ - error_code = dwb_compute_block_checksums (thread_p, block, NULL, &needs_flush); - if (error_code != NO_ERROR) - { - assert (false); - return error_code; - } - - /* All checksums computed. */ - assert (needs_flush == true); - } -#else - assert (dwb_block_has_all_checksums_computed (block->block_no)); #endif /* SERVER_MODE */ /* Flush all pages from current block */ - error_code = dwb_flush_block (thread_p, block, NULL); + error_code = dwb_flush_block (thread_p, block, false, NULL); if (error_code != NO_ERROR) { dwb_log_error ("Can't flush block = %d having version %lld\n", block->block_no, block->version); @@ -3734,7 +3007,6 @@ dwb_debug_check_dwb (THREAD_ENTRY * thread_p, DWB_SLOT * p_dwb_ordered_slots, un error_code = fileio_page_check_corruption (thread_p, p_dwb_ordered_slots[i - 1].io_page, &is_page_corrupted); if (error_code != NO_ERROR) { - /* Error in checksum computation. */ return error_code; } @@ -3746,7 +3018,6 @@ dwb_debug_check_dwb (THREAD_ENTRY * thread_p, DWB_SLOT * p_dwb_ordered_slots, un error_code = fileio_page_check_corruption (thread_p, p_dwb_ordered_slots[i].io_page, &is_page_corrupted); if (error_code != NO_ERROR) { - /* Error in checksum computation. */ return error_code; } @@ -3849,7 +3120,6 @@ dwb_check_data_page_is_sane (THREAD_ENTRY * thread_p, DWB_BLOCK * rcv_block, DWB error_code = fileio_page_check_corruption (thread_p, iopage, &is_page_corrupted); if (error_code != NO_ERROR) { - /* Error in checksum computation. */ return error_code; } @@ -3857,7 +3127,7 @@ dwb_check_data_page_is_sane (THREAD_ENTRY * thread_p, DWB_BLOCK * rcv_block, DWB { /* The page in data volume is not corrupted. Do not overwrite its content - reset slot VPID. */ VPID_SET_NULL (&p_dwb_ordered_slots[i].vpid); - fileio_initialize_res (thread_p, &(p_dwb_ordered_slots[i].io_page->prv)); + fileio_initialize_res (thread_p, p_dwb_ordered_slots[i].io_page, IO_PAGESIZE); continue; } @@ -3865,7 +3135,6 @@ dwb_check_data_page_is_sane (THREAD_ENTRY * thread_p, DWB_BLOCK * rcv_block, DWB error_code = fileio_page_check_corruption (thread_p, p_dwb_ordered_slots[i].io_page, &is_page_corrupted); if (error_code != NO_ERROR) { - /* Error in checksum computation. */ return error_code; } @@ -4038,7 +3307,8 @@ dwb_load_and_recover_pages (THREAD_ENTRY * thread_p, const char *dwb_path_p, con if (0 < num_recoverable_pages) { /* Replace the corrupted pages in data volume with the DWB content. */ - error_code = dwb_write_block (thread_p, rcv_block, p_dwb_ordered_slots, ordered_slots_length, false); + error_code = + dwb_write_block (thread_p, rcv_block, p_dwb_ordered_slots, ordered_slots_length, false, false); if (error_code != NO_ERROR) { goto end; @@ -4190,7 +3460,7 @@ dwb_flush_next_block (THREAD_ENTRY * thread_p) } } - error_code = dwb_flush_block (thread_p, flush_block, NULL); + error_code = dwb_flush_block (thread_p, flush_block, true, NULL); if (error_code != NO_ERROR) { /* Something wrong happened. */ @@ -4221,7 +3491,7 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) { UINT64 initial_position_with_flags, current_position_with_flags, prev_position_with_flags; UINT64 initial_block_version, current_block_version; - unsigned int initial_block_no, current_block_no = DWB_NUM_TOTAL_BLOCKS; + int initial_block_no, current_block_no = DWB_NUM_TOTAL_BLOCKS; char page_buf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT]; FILEIO_PAGE *iopage = NULL; VPID null_vpid = { NULL_VOLID, NULL_PAGEID }; @@ -4230,6 +3500,7 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) unsigned int count_added_pages = 0, max_pages_to_add = 0, initial_num_pages = 0; DWB_BLOCK *initial_block; PERF_UTIME_TRACKER time_track; + int block_no; assert (all_sync != NULL); @@ -4239,7 +3510,18 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) start: initial_position_with_flags = ATOMIC_INC_64 (&dwb_Global.position_with_flags, 0ULL); - dwb_log ("dwb_flush_force: Started with intital position = %lld\n", initial_position_with_flags); + dwb_log ("dwb_flush_force: Started with initital position = %lld\n", initial_position_with_flags); + +#if !defined (NDEBUG) + if (dwb_Global.blocks != NULL) + { + for (block_no = 0; block_no < (int) DWB_NUM_TOTAL_BLOCKS; block_no++) + { + dwb_log_error ("dwb_flush_force start: Block %d, Num pages = %d, version = %lld\n", + block_no, dwb_Global.blocks[block_no].count_wb_pages, dwb_Global.blocks[block_no].version); + } + } +#endif if (DWB_NOT_CREATED_OR_MODIFYING (initial_position_with_flags)) { @@ -4269,11 +3551,10 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) } } - initial_block_no = DWB_GET_BLOCK_NO_FROM_POSITION (initial_position_with_flags); - if (DWB_GET_BLOCK_STATUS (initial_position_with_flags) == 0) { /* Check helper flush block. */ + initial_block_no = DWB_GET_BLOCK_NO_FROM_POSITION (initial_position_with_flags); initial_block = dwb_Global.helper_flush_block; if (initial_block == NULL) { @@ -4284,14 +3565,22 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) goto wait_for_helper_flush_block; } - while (!DWB_IS_BLOCK_WRITE_STARTED (initial_position_with_flags, initial_block_no)) + /* Search for latest not flushed block - not flushed yet, having highest version. */ + initial_block_no = -1; + initial_block_version = 0; + for (block_no = 0; block_no < (int) DWB_NUM_TOTAL_BLOCKS; block_no++) { - /* Nothing to flush in this block, go to the previous block. */ - initial_block_no = DWB_GET_PREV_BLOCK_NO (initial_block_no); + if (DWB_IS_BLOCK_WRITE_STARTED (initial_position_with_flags, block_no) + && (dwb_Global.blocks[block_no].version >= initial_block_version)) + { + initial_block_no = block_no; + initial_block_version = dwb_Global.blocks[initial_block_no].version; + } } - /* Save the block version and number of pages, to detect whether the block was written on disk. */ - initial_block_version = dwb_Global.blocks[initial_block_no].version; + /* At least one block was not flushed. */ + assert (initial_block_no != -1); + initial_num_pages = dwb_Global.blocks[initial_block_no].count_wb_pages; if (initial_position_with_flags != ATOMIC_INC_64 (&dwb_Global.position_with_flags, 0ULL)) { @@ -4304,7 +3593,10 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) iopage = (FILEIO_PAGE *) PTR_ALIGN (page_buf, MAX_ALIGNMENT); memset (iopage, 0, IO_MAX_PAGE_SIZE); - fileio_initialize_res (thread_p, &(iopage->prv)); + fileio_initialize_res (thread_p, iopage, IO_PAGESIZE); + + dwb_log ("dwb_flush_force: Waits for flushing the block %d having version %lld and %d pages\n", + initial_block_no, initial_block_version, initial_num_pages); /* Check whether the initial block was flushed */ check_flushed_blocks: @@ -4425,6 +3717,17 @@ dwb_flush_force (THREAD_ENTRY * thread_p, bool * all_sync) dwb_log ("dwb_flush_force: Ended with position = %lld\n", ATOMIC_INC_64 (&dwb_Global.position_with_flags, 0ULL)); PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &time_track, PSTAT_DWB_FLUSH_FORCE_TIME_COUNTERS); +#if !defined (NDEBUG) + if (dwb_Global.blocks != NULL) + { + for (block_no = 0; block_no < (int) DWB_NUM_TOTAL_BLOCKS; block_no++) + { + dwb_log_error ("dwb_flush_force end: Block %d, Num pages = %d, version = %lld\n", + block_no, dwb_Global.blocks[block_no].count_wb_pages, dwb_Global.blocks[block_no].version); + } + } +#endif + return NO_ERROR; } @@ -4628,88 +3931,6 @@ dwb_flush_block_helper (THREAD_ENTRY * thread_p) return NO_ERROR; } -/* - * dwb_compute_checksums (): Computes checksum for requested slots. - * - * returns: Error code. - * thread_p (in): The thread entry. - */ -static int -dwb_compute_checksums (THREAD_ENTRY * thread_p) -{ - UINT64 position_with_flags; - unsigned int num_block, start_block, end_block; - bool block_slots_checksums_computed, block_needs_flush, checksum_computed; - int error_code = NO_ERROR; - -start: - position_with_flags = ATOMIC_INC_64 (&dwb_Global.position_with_flags, 0ULL); - - if (!DWB_IS_CREATED (position_with_flags) || DWB_IS_MODIFYING_STRUCTURE (position_with_flags)) - { - return NO_ERROR; - } - - start_block = dwb_Global.next_block_to_flush; - if (ATOMIC_INC_32 (&dwb_Global.blocks_flush_counter, 0) > 0) - { - if (start_block != dwb_Global.next_block_to_flush) - { - /* Try again, next_block_to_flush has changed. */ - goto start; - } - - start_block = DWB_GET_NEXT_BLOCK_NO (start_block); - } - - if (!dwb_block_has_all_checksums_computed (start_block)) - { - /* Compute only for the block that must be flushed first, to avoid delays. */ - end_block = start_block + 1; - } - else - { - end_block = dwb_Global.num_blocks; - } - - /* Compute checksums and/or flush the block. */ - checksum_computed = false; - for (num_block = start_block; num_block < end_block; num_block++) - { - error_code = dwb_compute_block_checksums (thread_p, &dwb_Global.blocks[num_block], - &block_slots_checksums_computed, &block_needs_flush); - if (error_code != NO_ERROR) - { - assert (false); - return error_code; - } - -#if defined(SERVER_MODE) - if (block_needs_flush) - { - if (dwb_is_flush_block_daemon_available ()) - { - /* Wakeup the thread to flush the block. */ - dwb_flush_block_daemon->wakeup (); - } - } -#endif /* SERVER_MODE */ - - if (block_slots_checksums_computed) - { - checksum_computed = true; - } - } - - if (checksum_computed) - { - /* Check again whether we can compute other checksums, requested meanwhile by concurrent transaction. */ - goto start; - } - - return NO_ERROR; -} - /* * dwb_read_page () - Reads page from DWB. * @@ -4834,30 +4055,6 @@ class dwb_flush_block_helper_daemon_task: public cubthread::entry_task } }; -// class dwb_checksum_computation_daemon_task -// -// description: -// dwb checksum computation daemon task -// -class dwb_checksum_computation_daemon_task: public cubthread::entry_task -{ - public: - void execute (cubthread::entry &thread_ref) override - { - if (!BO_IS_SERVER_RESTARTED ()) - { - // wait for boot to finish - return; - } - - /* flush pages as long as necessary */ - if (prm_get_bool_value (PRM_ID_ENABLE_DWB_CHECKSUM_THREAD) == true) - { - dwb_compute_checksums (&thread_ref); - } - } -}; - /* * dwb_flush_block_daemon_init () - initialize DWB flush block daemon thread */ @@ -4882,18 +4079,6 @@ dwb_flush_block_helper_daemon_init () dwb_flush_block_helper_daemon = cubthread::get_manager ()->create_daemon (looper, daemon_task); } -/* - * dwb_checksum_computation_daemon_init () - initialize DWB checksum computation daemon thread - */ -void -dwb_checksum_computation_daemon_init () -{ - cubthread::looper looper = cubthread::looper (std::chrono::milliseconds (20)); - dwb_checksum_computation_daemon_task *daemon_task = new dwb_checksum_computation_daemon_task (); - - dwb_checkum_computation_daemon = cubthread::get_manager ()->create_daemon (looper, daemon_task); -} - /* * dwb_daemons_init () - initialize DWB daemon threads */ @@ -4902,7 +4087,6 @@ dwb_daemons_init () { dwb_flush_block_daemon_init (); dwb_flush_block_helper_daemon_init (); - dwb_checksum_computation_daemon_init (); } /* @@ -4913,7 +4097,6 @@ dwb_daemons_destroy () { cubthread::get_manager ()->destroy_daemon (dwb_flush_block_daemon); cubthread::get_manager ()->destroy_daemon (dwb_flush_block_helper_daemon); - cubthread::get_manager ()->destroy_daemon (dwb_checkum_computation_daemon); } #endif /* SERVER_MODE */ // *INDENT-ON* @@ -4946,21 +4129,6 @@ dwb_is_flush_block_helper_daemon_available (void) #endif } -/* - * dwb_is_checksum_computation_daemon_available () - Check whether checksum computation daemon is available - * - * return: true, if checksum computation thread is available, false otherwise - */ -static bool -dwb_is_checksum_computation_daemon_available (void) -{ -#if defined (SERVER_MODE) - return dwb_checkum_computation_daemon != NULL; -#else - return false; -#endif -} - /* * dwb_flush_block_daemon_is_running () - Check whether flush block daemon is running * @@ -4990,18 +4158,3 @@ dwb_flush_block_helper_daemon_is_running (void) return false; #endif /* SERVER_MODE */ } - -/* - * dwb_checksum_computation_daemon_is_running () - Check whether checksum computation daemon is running - * - * return: true, if checksum computation thread is running - */ -static bool -dwb_checksum_computation_daemon_is_running (void) -{ -#if defined (SERVER_MODE) - return ((dwb_checkum_computation_daemon != NULL) && (dwb_checkum_computation_daemon->is_running ())); -#else - return false; -#endif /* SERVER_MODE */ -} diff --git a/src/storage/double_write_buffer.h b/src/storage/double_write_buffer.h index a95b1257181..fb5b0a90dbe 100644 --- a/src/storage/double_write_buffer.h +++ b/src/storage/double_write_buffer.h @@ -37,7 +37,6 @@ struct double_write_slot LOG_LSA lsa; /* The page LSA */ unsigned int position_in_block; /* The position in block. */ unsigned int block_no; /* The number of the block where the slot reside. */ - volatile int checksum_status; /* The checksum status. */ }; /* double write buffer interface */ diff --git a/src/storage/extendible_hash.c b/src/storage/extendible_hash.c index c1740be44ef..cc43935be12 100644 --- a/src/storage/extendible_hash.c +++ b/src/storage/extendible_hash.c @@ -50,6 +50,7 @@ #include "file_manager.h" #include "overflow_file.h" #include "memory_hash.h" /* For hash functions */ +#include "tz_support.h" #include "db_date.h" #include "thread_compat.hpp" diff --git a/src/storage/file_io.c b/src/storage/file_io.c index ef1ab307925..41e6850dfe5 100644 --- a/src/storage/file_io.c +++ b/src/storage/file_io.c @@ -574,8 +574,6 @@ static int fileio_synchronize_bg_archive_volume (THREAD_ENTRY * thread_p); static void fileio_page_bitmap_set (FILEIO_RESTORE_PAGE_BITMAP * page_bitmap, int page_id); static bool fileio_page_bitmap_is_set (FILEIO_RESTORE_PAGE_BITMAP * page_bitmap, int page_id); static void fileio_page_bitmap_dump (FILE * out_fp, const FILEIO_RESTORE_PAGE_BITMAP * page_bitmap); -static int fileio_compute_page_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, int *checksum_crc32); -static int fileio_page_has_valid_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, bool * has_valid_checksum); static int fileio_increase_flushed_page_count (int npages) @@ -1885,7 +1883,7 @@ fileio_initialize_pages (THREAD_ENTRY * thread_p, int vol_fd, FILEIO_PAGE * io_p /* check for interrupts from user (i.e. Ctrl-C) */ if ((page_id % FILEIO_CHECK_FOR_INTERRUPT_INTERVAL) == 0) { - if (pgbuf_is_log_check_for_interrupts (thread_p) == true) + if (logtb_get_check_interrupt (thread_p) && pgbuf_is_log_check_for_interrupts (thread_p)) { return NULL; } @@ -2387,7 +2385,7 @@ fileio_format (THREAD_ENTRY * thread_p, const char *db_full_name_p, const char * } memset ((char *) malloc_io_page_p, 0, page_size); - (void) fileio_initialize_res (thread_p, &(malloc_io_page_p->prv)); + (void) fileio_initialize_res (thread_p, malloc_io_page_p, page_size); vol_fd = fileio_create (thread_p, db_full_name_p, vol_label_p, vol_id, is_do_lock, is_do_sync); FI_TEST (thread_p, FI_TEST_FILE_IO_FORMAT, 0); @@ -2596,7 +2594,7 @@ fileio_expand_to (THREAD_ENTRY * thread_p, VOLID vol_id, DKNPAGES size_npages, D } memset (io_page_p, 0, IO_PAGESIZE); - (void) fileio_initialize_res (thread_p, &(io_page_p->prv)); + (void) fileio_initialize_res (thread_p, io_page_p, IO_PAGESIZE); start_pageid = (PAGEID) (current_size / IO_PAGESIZE); last_pageid = ((PAGEID) (new_size / IO_PAGESIZE) - 1); @@ -2636,7 +2634,7 @@ fileio_expand_to (THREAD_ENTRY * thread_p, VOLID vol_id, DKNPAGES size_npages, D db_private_free (thread_p, io_page_p); - return NO_ERROR; + return error_code; } #endif /* not CS_MODE */ @@ -2844,7 +2842,7 @@ fileio_copy_volume (THREAD_ENTRY * thread_p, int from_vol_desc, DKNPAGES npages, } else { - LSA_SET_NULL (&malloc_io_page_p->prv.lsa); + fileio_reset_page_lsa (malloc_io_page_p, IO_PAGESIZE); if (fileio_write_or_add_to_dwb (thread_p, to_vol_desc, malloc_io_page_p, page_id, IO_PAGESIZE) == NULL) { goto error; @@ -2900,7 +2898,8 @@ fileio_reset_volume (THREAD_ENTRY * thread_p, int vol_fd, const char *vlabel, DK { if (fileio_read (thread_p, vol_fd, malloc_io_page_p, page_id, IO_PAGESIZE) != NULL) { - LSA_COPY (&malloc_io_page_p->prv.lsa, reset_lsa_p); + fileio_set_page_lsa (malloc_io_page_p, reset_lsa_p, IO_PAGESIZE); + if (fileio_write_or_add_to_dwb (thread_p, vol_fd, malloc_io_page_p, page_id, IO_PAGESIZE) == NULL) { success = ER_FAILED; @@ -3023,6 +3022,41 @@ fileio_mount (THREAD_ENTRY * thread_p, const char *db_full_name_p, const char *v last_size = stat_buf.st_size; } +#if _POSIX_C_SOURCE >= 200112L + if (vol_id >= LOG_DBFIRST_VOLID && prm_get_integer_value (PRM_ID_DATA_FILE_ADVISE) != 0) + { + int advise_flag = 0; + off_t amount = 0; /* entire volume */ + switch (prm_get_integer_value (PRM_ID_DATA_FILE_ADVISE)) + { + case 1: + advise_flag = POSIX_FADV_NORMAL; + break; + case 2: + advise_flag = POSIX_FADV_SEQUENTIAL; + break; + case 3: + advise_flag = POSIX_FADV_RANDOM; + break; + case 4: + advise_flag = POSIX_FADV_NOREUSE; + break; + case 5: + advise_flag = POSIX_FADV_WILLNEED; + break; + case 6: + advise_flag = POSIX_FADV_DONTNEED; + break; + } + + if (posix_fadvise (vol_fd, 0, amount, advise_flag) != 0) + { + er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_IO_MOUNT_FAIL, 1, vol_label_p); + return NULL_VOLDES; + } + } +#endif /* _POSIX_C_SOURCE >= 200112L */ + /* LOCK THE DISK */ if (lock_wait != 0) { @@ -3585,7 +3619,7 @@ pwrite_with_injected_fault (THREAD_ENTRY * thread_p, int fd, const void *buf, si { static bool init = false; const int mod_factor = 25000; - const int unit_size = 4096; + const int block_size = 4096; int count_blocks; ssize_t r, written_nbytes; off_t unit_offset; @@ -3603,23 +3637,23 @@ pwrite_with_injected_fault (THREAD_ENTRY * thread_p, int fd, const void *buf, si if ((fi_partial_write1_on || fi_partial_write2_on) && ((rand () % mod_factor) == 0)) { // simulate partial write - count_blocks = count / unit_size; + count_blocks = count / block_size; written_nbytes = 0; for (int i = 0; i < count_blocks; i++) { if (fi_partial_write1_on) { - unit_offset = i * unit_size; + unit_offset = i * block_size; } else { // reverse order - unit_offset = ((count_blocks - 1) - i) * unit_size; + unit_offset = ((count_blocks - 1) - i) * block_size; } - r = pwrite (fd, ((char *) buf) + unit_offset, unit_size, offset + unit_offset); + r = pwrite (fd, ((char *) buf) + unit_offset, block_size, offset + unit_offset); written_nbytes += r; - if (r != unit_size) + if (r != block_size) { return written_nbytes; } @@ -3645,7 +3679,7 @@ pwrite_with_injected_fault (THREAD_ENTRY * thread_p, int fd, const void *buf, si fileio_page_hexa_dump ((const char *) buf, count); #if defined (SERVER_MODE) || defined (SA_MODE) - /* Verify checksum correctness before the crash, for proper recovery purpose. */ + /* Verify page correctness before the crash, for proper recovery purpose. */ if (fileio_is_permanent_volume_descriptor (thread_p, fd)) { /* Permanent data volume. */ @@ -4802,7 +4836,7 @@ fileio_write_user_area (THREAD_ENTRY * thread_p, int vol_fd, PAGEID page_id, off return NULL; } - (void) fileio_initialize_res (thread_p, &(io_page_p->prv)); + (void) fileio_initialize_res (thread_p, io_page_p, IO_PAGESIZE); memcpy (io_page_p->page, area_p, nbytes); write_p = (void *) io_page_p; @@ -9994,7 +10028,7 @@ fileio_fill_hole_during_restore (THREAD_ENTRY * thread_p, int *next_page_id_p, i return ER_FAILED; } memset ((char *) malloc_io_pgptr, 0, IO_PAGESIZE); - (void) fileio_initialize_res (thread_p, &(malloc_io_pgptr->prv)); + (void) fileio_initialize_res (thread_p, malloc_io_pgptr, IO_PAGESIZE); } while (*next_page_id_p < stop_page_id) @@ -11538,19 +11572,17 @@ fileio_os_sysconf (void) * return: */ void -fileio_initialize_res (THREAD_ENTRY * thread_p, FILEIO_PAGE_RESERVED * prv_p) +fileio_initialize_res (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, PGLENGTH page_size) { - LSA_SET_NULL (&(prv_p->lsa)); - prv_p->pageid = -1; - prv_p->volid = -1; - - /* Clears checksum for debug purpose. */ - prv_p->checksum = 0; + fileio_init_lsa_of_page (io_page, page_size); + io_page->prv.pageid = -1; + io_page->prv.volid = -1; - prv_p->ptype = '\0'; - prv_p->pflag_reserve_1 = '\0'; - prv_p->p_reserve_2 = 0; - prv_p->p_reserve_3 = 0; + io_page->prv.ptype = '\0'; + io_page->prv.pflag_reserve_1 = '\0'; + io_page->prv.p_reserve_1 = 0; + io_page->prv.p_reserve_2 = 0; + io_page->prv.p_reserve_3 = 0; } @@ -11786,84 +11818,6 @@ fileio_page_bitmap_dump (FILE * out_fp, const FILEIO_RESTORE_PAGE_BITMAP * page_ fprintf (out_fp, "\n"); } -/* - * fileio_compute_page_checksum - Computes data page checksum. - * return: error code - * thread_p (in) : thread entry - * io_page (in) : page pointer - * checksum_crc32 (out): computed checksum - * - * Note: Currently CRC32 is used as checksum. - */ -static int -fileio_compute_page_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, int *checksum_crc32) -{ - int error_code = NO_ERROR, saved_checksum_crc32; - - assert (io_page != NULL && checksum_crc32 != NULL); - - /* Save the old page checksum. */ - saved_checksum_crc32 = io_page->prv.checksum; - - /* Resets checksum to not affect the new computation. */ - io_page->prv.checksum = 0; - - /* Computes the page checksum. */ - error_code = crypt_crc32 (thread_p, (char *) io_page, IO_PAGESIZE, checksum_crc32); - - /* Restores the saved checksum */ - io_page->prv.checksum = saved_checksum_crc32; - - return error_code; -} - -/* - * fileio_page_has_valid_checksum - Check whether the page checksum is valid. - * return: error code - * thread_p (in): thread entry - * io_page (in): the page - * has_valid_checksum (out): true, if has valid checksum. - */ -static int -fileio_page_has_valid_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, bool * has_valid_checksum) -{ - int checksum_crc32, error_code = NO_ERROR; - - assert (io_page != NULL && has_valid_checksum != NULL); - - error_code = fileio_compute_page_checksum (thread_p, io_page, &checksum_crc32); - if (error_code == NO_ERROR) - { - *has_valid_checksum = (checksum_crc32 == io_page->prv.checksum); - } - - return error_code; -} - -/* - * fileio_set_page_checksum - Set page checksum. - * return: error code - * thread_p (in): thread entry - * io_page (in): page - * - * Note: Currently CRC32 is used as checksum. - */ -int -fileio_set_page_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page) -{ - int checksum_crc32, error_code = NO_ERROR; - - assert (io_page != NULL); - - error_code = fileio_compute_page_checksum (thread_p, io_page, &checksum_crc32); - if (error_code == NO_ERROR) - { - io_page->prv.checksum = checksum_crc32; - } - - return error_code; -} - /* * fileio_page_check_corruption - Check whether the page is corrupted. * return: error code @@ -11874,23 +11828,9 @@ fileio_set_page_checksum (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page) int fileio_page_check_corruption (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, bool * is_page_corrupted) { - int error_code; - bool has_valid_checksum; - assert (io_page != NULL && is_page_corrupted != NULL); - if (io_page->prv.checksum == 0) - { - /* The checksum was disabled. */ - *is_page_corrupted = false; - return NO_ERROR; - } + *is_page_corrupted = !fileio_is_page_sane (io_page, IO_PAGESIZE); - error_code = fileio_page_has_valid_checksum (thread_p, io_page, &has_valid_checksum); - if (error_code == NO_ERROR) - { - *is_page_corrupted = !has_valid_checksum; - } - - return error_code; + return NO_ERROR; } diff --git a/src/storage/file_io.h b/src/storage/file_io.h index a0a73675b73..53ac7163570 100644 --- a/src/storage/file_io.h +++ b/src/storage/file_io.h @@ -177,19 +177,86 @@ struct fileio_page_reserved INT16 volid; /* Volume identifier where the page reside */ unsigned char ptype; /* Page type */ unsigned char pflag_reserve_1; /* unused - Reserved field */ - INT32 checksum; /* Page checksum - currently CRC32 is used. */ + INT32 p_reserve_1; INT32 p_reserve_2; /* unused - Reserved field */ INT64 p_reserve_3; /* unused - Reserved field */ }; +typedef struct fileio_page_watermark FILEIO_PAGE_WATERMARK; +struct fileio_page_watermark +{ + LOG_LSA lsa; /* duplication of prv.lsa */ +}; + /* The FILEIO_PAGE */ typedef struct fileio_page FILEIO_PAGE; struct fileio_page { FILEIO_PAGE_RESERVED prv; /* System page area. Reserved */ char page[1]; /* The user page area */ + + // You cannot directly access prv2 like page_ptr.prv2, since it does not point to the real location */ + FILEIO_PAGE_WATERMARK prv2; /* system page area. It should be located at the end of page. */ }; +STATIC_INLINE FILEIO_PAGE_WATERMARK * +fileio_get_page_watermark_pos (FILEIO_PAGE * io_page, PGLENGTH page_size) +{ + return (FILEIO_PAGE_WATERMARK *) (((char *) io_page) + (page_size - sizeof (FILEIO_PAGE_WATERMARK))); +} + +STATIC_INLINE void +fileio_init_lsa_of_page (FILEIO_PAGE * io_page, PGLENGTH page_size) +{ + LSA_SET_NULL (&io_page->prv.lsa); + + FILEIO_PAGE_WATERMARK *prv2 = fileio_get_page_watermark_pos (io_page, page_size); + LSA_SET_NULL (&prv2->lsa); +} + +STATIC_INLINE void +fileio_init_lsa_of_temp_page (FILEIO_PAGE * io_page, PGLENGTH page_size) +{ + LOG_LSA *lsa_ptr; + + lsa_ptr = &io_page->prv.lsa; + lsa_ptr->pageid = NULL_PAGEID - 1; + lsa_ptr->offset = NULL_OFFSET - 1; + + FILEIO_PAGE_WATERMARK *prv2 = fileio_get_page_watermark_pos (io_page, page_size); + + lsa_ptr = &prv2->lsa; + lsa_ptr->pageid = NULL_PAGEID - 1; + lsa_ptr->offset = NULL_OFFSET - 1; +} + +STATIC_INLINE void +fileio_reset_page_lsa (FILEIO_PAGE * io_page, PGLENGTH page_size) +{ + LSA_SET_NULL (&io_page->prv.lsa); + + FILEIO_PAGE_WATERMARK *prv2 = fileio_get_page_watermark_pos (io_page, page_size); + + LSA_SET_NULL (&prv2->lsa); +} + +STATIC_INLINE void +fileio_set_page_lsa (FILEIO_PAGE * io_page, const LOG_LSA * lsa, PGLENGTH page_size) +{ + LSA_COPY (&io_page->prv.lsa, lsa); + + FILEIO_PAGE_WATERMARK *prv2 = fileio_get_page_watermark_pos (io_page, page_size); + + LSA_COPY (&prv2->lsa, lsa); +} + +STATIC_INLINE int +fileio_is_page_sane (FILEIO_PAGE * io_page, PGLENGTH page_size) +{ + FILEIO_PAGE_WATERMARK *prv2 = fileio_get_page_watermark_pos (io_page, page_size); + + return (LSA_EQ (&io_page->prv.lsa, &prv2->lsa)); +} typedef struct fileio_backup_page FILEIO_BACKUP_PAGE; struct fileio_backup_page @@ -408,7 +475,7 @@ extern int fileio_expand_to (THREAD_ENTRY * threda_p, VOLID volid, DKNPAGES npag #endif /* not CS_MODE */ extern void *fileio_initialize_pages (THREAD_ENTRY * thread_p, int vdes, FILEIO_PAGE * io_pgptr, DKNPAGES start_pageid, DKNPAGES npages, size_t page_size, int kbytes_to_be_written_per_sec); -extern void fileio_initialize_res (THREAD_ENTRY * thread_p, FILEIO_PAGE_RESERVED * prv_p); +extern void fileio_initialize_res (THREAD_ENTRY * thread_p, FILEIO_PAGE * io_page, PGLENGTH page_size); #if defined (ENABLE_UNUSED_FUNCTION) extern DKNPAGES fileio_truncate (VOLID volid, DKNPAGES npages_to_resize); #endif diff --git a/src/storage/heap_file.c b/src/storage/heap_file.c index 010fb2d5526..8558849dd23 100644 --- a/src/storage/heap_file.c +++ b/src/storage/heap_file.c @@ -50,6 +50,7 @@ #include "query_executor.h" #include "fetch.h" #include "server_interface.h" +#include "elo.h" #include "db_elo.h" #include "string_opfunc.h" #include "xasl.h" @@ -717,7 +718,9 @@ static SCAN_CODE heap_attrinfo_transform_to_disk_internal (THREAD_ENTRY * thread int lob_create_flag); static int heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid); static int heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid); +#if defined (ENABLE_UNUSED_FUNCTION) static HEAP_BESTSPACE heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid); +#endif /* #if defined (ENABLE_UNUSED_FUNCTION) */ static HEAP_STATS_ENTRY *heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpid, int freespace); static int heap_stats_entry_free (THREAD_ENTRY * thread_p, void *data, void *args); @@ -959,9 +962,12 @@ heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpi { HEAP_STATS_ENTRY *ent; int rc; + PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER; assert (prm_get_integer_value (PRM_ID_HF_MAX_BESTSPACE_ENTRIES) > 0); + PERF_UTIME_TRACKER_START (thread_p, &time_best_space); + rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex); ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid); @@ -1041,6 +1047,8 @@ heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpi pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex); + PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_ADD); + return ent; } @@ -1056,6 +1064,9 @@ heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid) HEAP_STATS_ENTRY *ent; int del_cnt = 0; int rc; + PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER; + + PERF_UTIME_TRACKER_START (thread_p, &time_best_space); rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex); @@ -1076,6 +1087,8 @@ heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid) assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht)); pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex); + PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_DEL); + return del_cnt; } @@ -1090,7 +1103,9 @@ heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid) { HEAP_STATS_ENTRY *ent; int rc; + PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER; + PERF_UTIME_TRACKER_START (thread_p, &time_best_space); rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex); ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid); @@ -1111,9 +1126,12 @@ heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid) pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex); + PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_DEL); + return NO_ERROR; } +#if defined (ENABLE_UNUSED_FUNCTION) /* * heap_stats_get_bestspace_by_vpid () - * return: NO_ERROR @@ -1147,6 +1165,7 @@ heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid) return best; } +#endif /* ENABLE_UNUSED_FUNCTION */ /* * Scan page buffer and latch page manipulation @@ -3137,9 +3156,13 @@ heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, H int i, best_array_index = -1; bool hash_is_available; bool best_hint_is_used; + PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER; + PERF_UTIME_TRACKER time_find_page_best_space = PERF_UTIME_TRACKER_INITIALIZER; assert (PGBUF_IS_CLEAN_WATCHER (pg_watcher)); + PERF_UTIME_TRACKER_START (thread_p, &time_find_page_best_space); + /* * If a page is busy, don't wait continue looking for other pages in our * statistics. This will improve some contentions on the heap at the @@ -3161,6 +3184,7 @@ heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, H if (hash_is_available) { + PERF_UTIME_TRACKER_START (thread_p, &time_best_space); rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex); while (notfound_cnt < BEST_PAGE_SEARCH_MAX_COUNT @@ -3185,6 +3209,7 @@ heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, H } pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex); + PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_FIND); } if (best.freespace == -1) @@ -3222,7 +3247,8 @@ heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, H break; } #if defined (SERVER_MODE) - assert (er_errid () == ER_INTERRUPTED); + // ignores a warning and expects no other errors + assert (er_errid_if_has_error () == NO_ERROR); #endif /* SERVER_MODE */ er_clear (); } @@ -3340,6 +3366,7 @@ heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, H * Reset back the timeout value of the transaction */ (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msecs); + PERF_UTIME_TRACKER_TIME (thread_p, &time_find_page_best_space, PSTAT_HF_HEAP_FIND_PAGE_BEST_SPACE); return found; } @@ -3374,7 +3401,9 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede float other_high_best_ratio; PGBUF_WATCHER hdr_page_watcher; int error_code = NO_ERROR; + PERF_UTIME_TRACKER time_find_best_page = PERF_UTIME_TRACKER_INITIALIZER; + PERF_UTIME_TRACKER_START (thread_p, &time_find_best_page); /* * Try to use the space cache for as much information as possible to avoid * fetching and updating the header page a lot. @@ -3403,7 +3432,7 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede { /* something went wrong. Unable to fetch header page */ ASSERT_ERROR (); - return NULL; + goto error; } assert (hdr_page_watcher.pgptr != NULL); @@ -3413,7 +3442,7 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede { assert (false); pgbuf_ordered_unfix (thread_p, &hdr_page_watcher); - return NULL; + goto error; } heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data; @@ -3447,7 +3476,7 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede ASSERT_ERROR (); assert (pg_watcher->pgptr == NULL); pgbuf_ordered_unfix (thread_p, &hdr_page_watcher); - return NULL; + goto error; } if (pg_watcher->pgptr != NULL) { @@ -3504,7 +3533,7 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede { pgbuf_ordered_unfix (thread_p, &hdr_page_watcher); ASSERT_ERROR (); - return NULL; + goto error; } } while (num_pages_found == 0 && try_sync <= 2); @@ -3527,7 +3556,7 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede { ASSERT_ERROR (); pgbuf_ordered_unfix (thread_p, &hdr_page_watcher); - return NULL; + goto error; } assert (pg_watcher->pgptr != NULL || er_errid () == ER_INTERRUPTED || er_errid () == ER_FILE_NOT_ENOUGH_PAGES_IN_DATABASE); @@ -3537,7 +3566,14 @@ heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int neede log_skip_logging (thread_p, &addr_hdr); pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_page_watcher); + PERF_UTIME_TRACKER_TIME (thread_p, &time_find_best_page, PSTAT_HF_HEAP_FIND_BEST_PAGE); + return pg_watcher->pgptr; + +error: + PERF_UTIME_TRACKER_TIME (thread_p, &time_find_best_page, PSTAT_HF_HEAP_FIND_BEST_PAGE); + + return NULL; } /* @@ -3582,16 +3618,10 @@ heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_ bool search_all = false; PGBUF_WATCHER pg_watcher; PGBUF_WATCHER old_pg_watcher; -#if defined (CUBRID_DEBUG) - TSC_TICKS start_tick, end_tick; - TSCTIMEVAL tv_diff; + PERF_UTIME_TRACKER timer_sync_best_space = PERF_UTIME_TRACKER_INITIALIZER; - float elapsed; + PERF_UTIME_TRACKER_START (thread_p, &timer_sync_best_space); - tsc_getticks (&start_tick); -#endif /* CUBRID_DEBUG */ - - perfmon_inc_stat (thread_p, PSTAT_HEAP_NUM_STATS_SYNC_BESTSPACE); PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid); PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid); @@ -3809,7 +3839,7 @@ heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_ * used to handle "select count(*) from table". */ if (scan_all == false && num_high_best == 0 && heap_hdr->estimates.num_second_best == 0) { - return 0; + goto end; } if (num_high_best < HEAP_NUM_BEST_SPACESTATS) @@ -3860,15 +3890,8 @@ heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_ } } -#if defined (CUBRID_DEBUG) - tsc_getticks (&end_tick); - tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick); - elapsed = (float) tv_diff.tv_sec * 1000000; - elapsed += (float) tv_diff.tv_usec; - elapsed /= 1000000; - - er_log_debug (ARG_FILE_LINE, "heap_stats_sync_bestspace: elapsed time %.6f", elapsed); -#endif /* CUBRID_DEBUG */ +end: + PERF_UTIME_TRACKER_TIME (thread_p, &timer_sync_best_space, PSTAT_HEAP_STATS_SYNC_BESTSPACE); return num_high_best; } @@ -17216,7 +17239,7 @@ heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_in index = &(attr_info->last_classrepr->indexes[btid_index]); if (func_pred_cache) { - func_pred = (FUNC_PRED *) func_pred_cache->func_pred; + func_pred = func_pred_cache->func_pred; cache_attr_info = func_pred->cache_attrinfo; nr_atts = index->n_atts; } @@ -17247,8 +17270,8 @@ heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_in expr_stream_size = func_index_info->expr_stream_size; nr_atts = n_atts; atts = att_ids; - cache_attr_info = ((FUNC_PRED *) func_index_info->expr)->cache_attrinfo; - func_pred = (FUNC_PRED *) func_index_info->expr; + cache_attr_info = func_index_info->expr->cache_attrinfo; + func_pred = func_index_info->expr; } if (func_index_info == NULL) @@ -17256,8 +17279,7 @@ heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_in /* insert case, read the values */ if (func_pred == NULL) { - if (stx_map_stream_to_func_pred (thread_p, (FUNC_PRED **) (&func_pred), expr_stream, expr_stream_size, - &unpack_info)) + if (stx_map_stream_to_func_pred (thread_p, &func_pred, expr_stream, expr_stream_size, &unpack_info)) { error = ER_FAILED; goto end; @@ -17280,9 +17302,8 @@ heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_in attrinfo_clear = true; } - error = - fetch_peek_dbval (thread_p, func_pred->func_regu, NULL, &cache_attr_info->class_oid, &cache_attr_info->inst_oid, - NULL, &res); + error = fetch_peek_dbval (thread_p, func_pred->func_regu, NULL, &cache_attr_info->class_oid, + &cache_attr_info->inst_oid, NULL, &res); if (error == NO_ERROR) { pr_clone_value (res, result); @@ -17391,8 +17412,8 @@ heap_init_func_pred_unpack_info (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * } } - if (stx_map_stream_to_func_pred (thread_p, (FUNC_PRED **) (&(fi_preds[i].func_pred)), - fi_info->expr_stream, fi_info->expr_stream_size, &(fi_preds[i].unpack_info))) + if (stx_map_stream_to_func_pred (thread_p, &fi_preds[i].func_pred, fi_info->expr_stream, + fi_info->expr_stream_size, &fi_preds[i].unpack_info)) { error_status = ER_FAILED; goto error; @@ -17413,7 +17434,7 @@ heap_init_func_pred_unpack_info (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * } if (heap_attrinfo_start (thread_p, class_oid, idx->n_atts, att_ids, - ((FUNC_PRED *) fi_preds[i].func_pred)->cache_attrinfo) != NO_ERROR) + fi_preds[i].func_pred->cache_attrinfo) != NO_ERROR) { error_status = ER_FAILED; goto error; @@ -17478,10 +17499,10 @@ heap_free_func_pred_unpack_info (THREAD_ENTRY * thread_p, int n_indexes, FUNC_PR { if (attr_info_started == NULL || attr_info_started[i]) { - assert (((FUNC_PRED *) func_indx_preds[i].func_pred)->cache_attrinfo); - (void) heap_attrinfo_end (thread_p, ((FUNC_PRED *) func_indx_preds[i].func_pred)->cache_attrinfo); + assert (func_indx_preds[i].func_pred->cache_attrinfo); + (void) heap_attrinfo_end (thread_p, func_indx_preds[i].func_pred->cache_attrinfo); } - (void) qexec_clear_func_pred (thread_p, (FUNC_PRED *) func_indx_preds[i].func_pred); + (void) qexec_clear_func_pred (thread_p, func_indx_preds[i].func_pred); } if (func_indx_preds[i].unpack_info) diff --git a/src/storage/heap_file.h b/src/storage/heap_file.h index 228f2952a19..f13f98077d8 100644 --- a/src/storage/heap_file.h +++ b/src/storage/heap_file.h @@ -181,9 +181,8 @@ struct heap_hfid_table_entry FILE_TYPE ftype; /* value - FILE_HEAP or FILE_HEAP_REUSE_SLOTS */ }; - - - +// forward declaration +struct func_pred; typedef struct function_index_info FUNCTION_INDEX_INFO; struct function_index_info @@ -192,13 +191,13 @@ struct function_index_info int expr_stream_size; int col_id; int attr_index_start; - void *expr; + struct func_pred *expr; }; typedef struct func_pred_unpack_info FUNC_PRED_UNPACK_INFO; struct func_pred_unpack_info { - void *func_pred; + struct func_pred *func_pred; void *unpack_info; }; diff --git a/src/storage/page_buffer.c b/src/storage/page_buffer.c index 4881daecb2d..9d211384e38 100644 --- a/src/storage/page_buffer.c +++ b/src/storage/page_buffer.c @@ -3038,7 +3038,7 @@ pgbuf_flush_all_helper (THREAD_ENTRY * thread_p, VOLID volid, bool is_unfixed_on if (is_set_lsa_as_null) { /* set PageLSA as NULL value */ - LSA_SET_INIT_NONTEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); } /* flush */ @@ -4438,13 +4438,14 @@ pgbuf_set_lsa (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const LOG_LSA * lsa_ptr) */ if (pgbuf_is_temporary_volume (bufptr->vpid.volid) == true) { - LSA_SET_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_temp_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); if (logtb_is_current_active (thread_p)) { return NULL; } } - LSA_COPY (&bufptr->iopage_buffer->iopage.prv.lsa, lsa_ptr); + + fileio_set_page_lsa (&bufptr->iopage_buffer->iopage, lsa_ptr, IO_PAGESIZE); /* * If this is the first time the page is set dirty, record the new LSA @@ -4500,7 +4501,7 @@ pgbuf_reset_temp_lsa (PAGE_PTR pgptr) PGBUF_BCB *bufptr; CAST_PGPTR_TO_BFPTR (bufptr, pgptr); - LSA_SET_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_temp_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); } /* @@ -4728,7 +4729,7 @@ pgbuf_set_lsa_as_temporary (THREAD_ENTRY * thread_p, PAGE_PTR pgptr) CAST_PGPTR_TO_BFPTR (bufptr, pgptr); assert (!VPID_ISNULL (&bufptr->vpid)); - LSA_SET_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_temp_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); pgbuf_set_dirty_buffer_ptr (thread_p, bufptr); } @@ -4760,7 +4761,8 @@ pgbuf_set_lsa_as_permanent (THREAD_ENTRY * thread_p, PAGE_PTR pgptr) restart_lsa = log_get_restart_lsa (); } - LSA_COPY (&bufptr->iopage_buffer->iopage.prv.lsa, restart_lsa); + fileio_set_page_lsa (&bufptr->iopage_buffer->iopage, restart_lsa, IO_PAGESIZE); + pgbuf_set_dirty_buffer_ptr (thread_p, bufptr); } } @@ -4796,6 +4798,7 @@ pgbuf_set_bcb_page_vpid (PGBUF_BCB * bufptr, bool force_set_vpid) bufptr->iopage_buffer->iopage.prv.ptype = '\0'; bufptr->iopage_buffer->iopage.prv.pflag_reserve_1 = '\0'; + bufptr->iopage_buffer->iopage.prv.p_reserve_1 = 0; bufptr->iopage_buffer->iopage.prv.p_reserve_2 = 0; bufptr->iopage_buffer->iopage.prv.p_reserve_3 = 0; } @@ -4970,18 +4973,17 @@ pgbuf_initialize_bcb_table (void) /* link BCB and iopage buffer */ ioptr = PGBUF_FIND_IOPAGE_PTR (i); - LSA_SET_NULL (&ioptr->iopage.prv.lsa); + fileio_init_lsa_of_page (&ioptr->iopage, IO_PAGESIZE); /* Init Page identifier */ ioptr->iopage.prv.pageid = -1; ioptr->iopage.prv.volid = -1; -#if 1 /* do not delete me */ ioptr->iopage.prv.ptype = '\0'; ioptr->iopage.prv.pflag_reserve_1 = '\0'; + ioptr->iopage.prv.p_reserve_1 = 0; ioptr->iopage.prv.p_reserve_2 = 0; ioptr->iopage.prv.p_reserve_3 = 0; -#endif bufptr->iopage_buffer = ioptr; ioptr->bcb = bufptr; @@ -7714,7 +7716,7 @@ pgbuf_claim_bcb_for_fix (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_FETCH_ /* Check iff the first time to access */ if (!LSA_IS_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa)) { - LSA_SET_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_temp_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); pgbuf_set_dirty_buffer_ptr (thread_p, bufptr); } } @@ -7750,11 +7752,11 @@ pgbuf_claim_bcb_for_fix (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_FETCH_ /* Don't need to read page from disk since it is a new page. */ if (pgbuf_is_temporary_volume (vpid->volid) == true) { - LSA_SET_INIT_TEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_temp_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); } else { - LSA_SET_INIT_NONTEMP (&bufptr->iopage_buffer->iopage.prv.lsa); + fileio_init_lsa_of_page (&bufptr->iopage_buffer->iopage, IO_PAGESIZE); } /* perm volume */ @@ -10347,6 +10349,7 @@ pgbuf_check_bcb_page_vpid (PGBUF_BCB * bufptr, bool maybe_deallocated) && bufptr->vpid.volid == bufptr->iopage_buffer->iopage.prv.volid)); assert (bufptr->iopage_buffer->iopage.prv.pflag_reserve_1 == '\0'); + assert (bufptr->iopage_buffer->iopage.prv.p_reserve_1 == 0); assert (bufptr->iopage_buffer->iopage.prv.p_reserve_2 == 0); assert (bufptr->iopage_buffer->iopage.prv.p_reserve_3 == 0); @@ -10374,7 +10377,7 @@ static void pgbuf_scramble (FILEIO_PAGE * iopage) { MEM_REGION_INIT (iopage, IO_PAGESIZE); - LSA_SET_NULL (&iopage->prv.lsa); + fileio_init_lsa_of_page (iopage, IO_PAGESIZE); /* Init Page identifier */ iopage->prv.pageid = -1; @@ -10382,6 +10385,7 @@ pgbuf_scramble (FILEIO_PAGE * iopage) iopage->prv.ptype = '\0'; iopage->prv.pflag_reserve_1 = '\0'; + iopage->prv.p_reserve_1 = 0; iopage->prv.p_reserve_2 = 0; iopage->prv.p_reserve_3 = 0; } diff --git a/src/storage/statistics_cl.c b/src/storage/statistics_cl.c index f86e953d6b1..b0249259654 100644 --- a/src/storage/statistics_cl.c +++ b/src/storage/statistics_cl.c @@ -34,6 +34,7 @@ #include "work_space.h" #include "schema_manager.h" #include "network_interface_cl.h" +#include "tz_support.h" #include "db_date.h" static CLASS_STATS *stats_client_unpack_statistics (char *buffer); diff --git a/src/storage/storage_common.c b/src/storage/storage_common.c index 8213e6c2c1a..e654f07e516 100644 --- a/src/storage/storage_common.c +++ b/src/storage/storage_common.c @@ -35,12 +35,13 @@ #include "system_parameter.h" #include "environment_variable.h" #include "file_io.h" +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" /* RESERVED_SIZE_IN_PAGE should be aligned */ -#define RESERVED_SIZE_IN_PAGE sizeof(FILEIO_PAGE_RESERVED) +#define RESERVED_SIZE_IN_PAGE (sizeof (FILEIO_PAGE_RESERVED) + sizeof (FILEIO_PAGE_WATERMARK)) static PGLENGTH db_Io_page_size = IO_DEFAULT_PAGE_SIZE; static PGLENGTH db_Log_page_size = IO_DEFAULT_PAGE_SIZE; diff --git a/src/storage/storage_common.h b/src/storage/storage_common.h index 64a64b689d2..447a65c8bca 100644 --- a/src/storage/storage_common.h +++ b/src/storage/storage_common.h @@ -36,9 +36,9 @@ #include #include "porting.h" -#include "dbdef.h" #include "dbtype_def.h" #include "sha1.h" +#include "cache_time.h" /* LIMITS AND NULL VALUES ON DISK RELATED DATATYPES */ @@ -108,6 +108,7 @@ struct log_lsa }; typedef struct log_lsa LOG_LSA; /* Log address identifier */ + STATIC_INLINE void LSA_COPY (LOG_LSA * plsa1, const LOG_LSA * plsa2) { @@ -115,11 +116,19 @@ LSA_COPY (LOG_LSA * plsa1, const LOG_LSA * plsa2) plsa1->offset = plsa2->offset; } -#define LSA_SET_NULL(lsa_ptr)\ - do { \ - (lsa_ptr)->pageid = NULL_PAGEID; \ - (lsa_ptr)->offset = NULL_OFFSET; \ - } while(0) +STATIC_INLINE void +LSA_SET_NULL (LOG_LSA * lsa_ptr) +{ + lsa_ptr->pageid = NULL_PAGEID; + lsa_ptr->offset = NULL_OFFSET; +} + +STATIC_INLINE void +LSA_SET_TEMP_LSA (LOG_LSA * lsa_ptr) +{ + lsa_ptr->pageid = NULL_PAGEID - 1; + lsa_ptr->offset = NULL_OFFSET - 1; +} #define LSA_INITIALIZER {NULL_PAGEID, NULL_OFFSET} @@ -1054,9 +1063,11 @@ typedef enum T_JSON_TYPE, T_JSON_EXTRACT, T_JSON_VALID, + T_JSON_QUOTE, + T_JSON_UNQUOTE, T_JSON_LENGTH, T_JSON_DEPTH, - T_JSON_SEARCH, + T_JSON_PRETTY, } OPERATOR_TYPE; /* arithmetic operator types */ typedef enum @@ -1074,6 +1085,8 @@ typedef enum PT_RANK, PT_DENSE_RANK, PT_NTILE, + PT_JSON_ARRAYAGG, + PT_JSON_OBJECTAGG, PT_TOP_AGG_FUNC, /* only aggregate functions should be below PT_TOP_AGG_FUNC */ @@ -1091,9 +1104,10 @@ typedef enum /* "normal" functions, arguments are values */ F_SET, F_MULTISET, F_SEQUENCE, F_VID, F_GENERIC, F_CLASS_OF, - F_INSERT_SUBSTRING, F_ELT, F_JSON_OBJECT, F_JSON_ARRAY, F_JSON_MERGE, + F_INSERT_SUBSTRING, F_ELT, F_JSON_OBJECT, F_JSON_ARRAY, F_JSON_MERGE, F_JSON_MERGE_PATCH, F_JSON_INSERT, F_JSON_REMOVE, F_JSON_ARRAY_APPEND, F_JSON_GET_ALL_PATHS, - F_JSON_REPLACE, F_JSON_SET, F_JSON_KEYS, + F_JSON_REPLACE, F_JSON_SET, F_JSON_KEYS, F_JSON_ARRAY_INSERT, F_JSON_SEARCH, + F_JSON_CONTAINS_PATH, /* only for FIRST_VALUE. LAST_VALUE, NTH_VALUE analytic functions */ PT_FIRST_VALUE, PT_LAST_VALUE, PT_NTH_VALUE, @@ -1109,16 +1123,6 @@ typedef enum /* QUERY */ /************************************************************************/ -/* - * CACHE TIME RELATED DEFINITIONS - */ -typedef struct cache_time CACHE_TIME; -struct cache_time -{ - int sec; - int usec; -}; - #define CACHE_TIME_AS_ARGS(ct) (ct)->sec, (ct)->usec #define CACHE_TIME_EQ(T1, T2) \ @@ -1371,4 +1375,47 @@ enum REC_4BIT_TYPE_MAX = REC_RESERVED_TYPE_15 }; +typedef struct dbdef_vol_ext_info DBDEF_VOL_EXT_INFO; +struct dbdef_vol_ext_info +{ + const char *path; /* Directory where the volume extension is created. If NULL, is given, it defaults to + * the system parameter. */ + const char *name; /* Name of the volume extension If NULL, system generates one like "db".ext"volid" + * where "db" is the database name and "volid" is the volume identifier to be assigned + * to the volume extension. */ + const char *comments; /* Comments which are included in the volume extension header. */ + int max_npages; /* Maximum pages of this volume */ + int extend_npages; /* Number of pages to extend - used for generic volume only */ + INT32 nsect_total; /* DKNSECTS type, number of sectors for volume extension */ + INT32 nsect_max; /* DKNSECTS type, maximum number of sectors for volume extension */ + int max_writesize_in_sec; /* the amount of volume written per second */ + DB_VOLPURPOSE purpose; /* The purpose of the volume extension. One of the following: - + * DB_PERMANENT_DATA_PURPOSE, DB_TEMPORARY_DATA_PURPOSE */ + DB_VOLTYPE voltype; /* Permanent of temporary volume type */ + bool overwrite; +}; + +#define SERVER_SESSION_KEY_SIZE 8 + +typedef enum +{ + DB_PARTITION_HASH = 0, + DB_PARTITION_RANGE, + DB_PARTITION_LIST +} DB_PARTITION_TYPE; + +typedef enum +{ + DB_NOT_PARTITIONED_CLASS = 0, + DB_PARTITIONED_CLASS = 1, + DB_PARTITION_CLASS = 2 +} DB_CLASS_PARTITION_TYPE; + +// TODO: move me in a proper place +typedef enum +{ + KILLSTMT_TRAN = 0, + KILLSTMT_QUERY = 1, +} KILLSTMT_TYPE; + #endif /* _STORAGE_COMMON_H_ */ diff --git a/src/thread/thread_entry_task.cpp b/src/thread/thread_entry_task.cpp index b26ba4a4db1..7d06a8939f3 100644 --- a/src/thread/thread_entry_task.cpp +++ b/src/thread/thread_entry_task.cpp @@ -44,6 +44,7 @@ namespace cubthread context.type = TT_WORKER; #if defined (SERVER_MODE) context.m_status = entry::status::TS_RUN; + context.shutdown = false; #endif // SERVER_MODE context.get_error_context ().register_thread_local (); @@ -83,7 +84,9 @@ namespace cubthread context.tran_index = NULL_TRAN_INDEX; // clear transaction ID #if defined (SERVER_MODE) context.resume_status = THREAD_RESUME_NONE; + context.shutdown = false; #endif // SERVER_MODE + on_recycle (context); } diff --git a/src/thread/thread_worker_pool.hpp b/src/thread/thread_worker_pool.hpp index eda56a66538..7e560987b2f 100644 --- a/src/thread/thread_worker_pool.hpp +++ b/src/thread/thread_worker_pool.hpp @@ -1162,7 +1162,7 @@ namespace cubthread void worker_pool::core::worker::start_thread (void) { - assert (!m_has_thread); + assert (m_has_thread); // // the next code tries to help visualizing any system errors that can occur during create or detach in debug diff --git a/src/transaction/boot_cl.c b/src/transaction/boot_cl.c index 999aaac8e12..c837d8100aa 100644 --- a/src/transaction/boot_cl.c +++ b/src/transaction/boot_cl.c @@ -2988,7 +2988,8 @@ boot_add_data_type (MOP class_mop) NULL /* TABLE */ , "BIGINT", "DATETIME", "BLOB", "CLOB", "ENUM", - "TIMESTAMPTZ", "TIMESTAMPLTZ", "DATETIMETZ", "DATETIMELTZ" + "TIMESTAMPTZ", "TIMESTAMPLTZ", "DATETIMETZ", "DATETIMELTZ", + "JSON" }; for (i = 0; i < DB_TYPE_LAST; i++) diff --git a/src/transaction/boot_sr.c b/src/transaction/boot_sr.c index 875a8fa5b0b..ba66329d134 100644 --- a/src/transaction/boot_sr.c +++ b/src/transaction/boot_sr.c @@ -355,7 +355,7 @@ xboot_find_number_temp_volumes (THREAD_ENTRY * thread_p) /* * xboot_find_last_permanent () - find the volid of last permanent volume - * + * * return : volid of last permanent volume */ VOLID @@ -373,7 +373,7 @@ xboot_find_last_permanent (THREAD_ENTRY * thread_p) /* * xboot_peek_last_permanent () - peek the volid of last permanent volume - * + * * return : volid of last permanent volume * NOTE: this function does not wait for extensions to finish */ @@ -575,7 +575,7 @@ boot_remove_useless_path_separator (const char *path, char *new_path) assert (path != NULL); assert (new_path != NULL); - /* + /* * Before transform. * / h o m e 3 / / w o r k / c u b r i d / / / w o r k / * @@ -595,7 +595,7 @@ boot_remove_useless_path_separator (const char *path, char *new_path) { *new_path++ = PATH_SEPARATOR; #if defined(WINDOWS) - /* + /* * In Windows/NT, * If first duplicated PATH_SEPARATORs are appeared, they are survived. * For example, @@ -611,7 +611,7 @@ boot_remove_useless_path_separator (const char *path, char *new_path) /* Initialize separator counter again. */ slash_num = 0; - /* + /* * If current character is PATH_SEPARATOR, * skip after increasing separator counter. * If current character is normal character, copy to new_path. @@ -624,7 +624,7 @@ boot_remove_useless_path_separator (const char *path, char *new_path) } else { - /* + /* * If there is consumed slash, append PATH_SEPARATOR. * Initialize separator counter. */ @@ -687,7 +687,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ return ER_LOG_USER_FILE_UNKNOWN; } - /* + /* * Get a line * Continue parsing even in case of error, so that we can indicate as * many errors as possible. @@ -711,7 +711,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ line[ext_npages + 1] = '\0'; } - /* + /* * Parse the line */ @@ -723,7 +723,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ while (true) { - /* + /* * Read token.. skip leading whitespace and comments */ while (char_isspace (line[0])) @@ -746,7 +746,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ line[0] = '\0'; line++; - /* + /* * Skip any whitespace before the value. */ @@ -757,7 +757,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ token_value = line; - /* + /* * If string in " xxx " or ' xxxx ' find its delimiter */ @@ -845,7 +845,7 @@ boot_parse_add_volume_extensions (THREAD_ENTRY * thread_p, const char *filename_ } } - /* + /* * Add the volume */ if (error_code != NO_ERROR || (ext_name == NULL && ext_path == NULL && ext_comments == NULL && ext_npages == 0)) @@ -1076,7 +1076,7 @@ boot_find_rest_permanent_volumes (THREAD_ENTRY * thread_p, bool newvolpath, bool num_vols = 0; /* First the primary volume, then the rest of the volumes */ - /* + /* * Do not assume that all the volumes are mounted. This function may be called to mount the volumes. * Thus, request to current volume for the next volume instead of going directly through the volume identifier. */ @@ -1160,7 +1160,7 @@ boot_find_rest_temp_volumes (THREAD_ENTRY * thread_p, VOLID volid, int num_vols; bool go_to_access; - /* + /* * Get the name of the extension: ext_path|dbname|"ext"|volid */ @@ -1262,7 +1262,7 @@ boot_check_permanent_volumes (THREAD_ENTRY * thread_p) char next_vol_fullname[PATH_MAX]; /* Next volume name */ const char *vlabel; - /* + /* * Don't use volinfo .. or could not find volinfo */ @@ -1270,7 +1270,7 @@ boot_check_permanent_volumes (THREAD_ENTRY * thread_p) num_vols = 0; strcpy (next_vol_fullname, boot_Db_full_name); - /* + /* * Do not assume that all the volumes are mounted. This function may be * called to mount the volumes. Thus, request to current volume for the * next volume instead of going directly through the volume identifier. @@ -1288,7 +1288,7 @@ boot_check_permanent_volumes (THREAD_ENTRY * thread_p) if (util_compare_filepath (next_vol_fullname, vlabel) != 0) { - /* + /* * Names are different. The database was renamed outside the domain of * the database (e.g., in Unix), or this is not a database. * If volume information is not present, assume that this is not a @@ -1345,7 +1345,7 @@ boot_mount (THREAD_ENTRY * thread_p, VOLID volid, const char *vlabel, void *igno if (util_compare_filepath (check_vlabel, vlabel) != 0) { - /* + /* * Names are different. The database was renamed outside the domain of * the database (e.g., in Unix), or this is not a database. * If volume information is not present, assume that this is not a @@ -1555,7 +1555,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ goto exit_on_error; } - /* + /* * Make sure that the db_path and log_path and lob_path are the canonicalized * absolute pathnames */ @@ -1564,7 +1564,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ memset (log_pathbuf, 0, sizeof (log_pathbuf)); memset (lob_pathbuf, 0, sizeof (lob_pathbuf)); - /* + /* * for db path, * convert to absolute path, remove useless PATH_SEPARATOR */ @@ -1581,7 +1581,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ boot_remove_useless_path_separator (db_path, db_pathbuf); db_path = db_pathbuf; - /* + /* * for log path, * convert to absolute path, remove useless PATH_SEPARATOR */ @@ -1598,7 +1598,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ boot_remove_useless_path_separator (log_path, log_pathbuf); log_path = log_pathbuf; - /* + /* * for lob path, * convert to absolute path, remove useless PATH_SEPARATOR */ @@ -1644,13 +1644,13 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ } lob_path = lob_pathbuf; - /* + /* * Compose the full name of the database */ snprintf (boot_Db_full_name, sizeof (boot_Db_full_name), "%s%c%s", db_path, PATH_SEPARATOR, client_credential->db_name); - /* + /* * Initialize error structure, critical section, slotted page, heap, and * recovery managers */ @@ -1673,7 +1673,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ log_prefix = fileio_get_base_file_name (client_credential->db_name); - /* + /* * Find logging information to create the log volume. If the page size is * not the same as the one in production mode, adjust the number of pages * allocated. @@ -1704,7 +1704,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ } /* *INDENT-ON* */ - /* + /* * get the database directory information in write mode. */ @@ -1743,7 +1743,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ } else { - /* + /* * Delete the database.. to make sure that all backups, log archives, and * so on are removed... then continue... * @@ -1872,7 +1872,7 @@ xboot_initialize_server (const BOOT_CLIENT_CREDENTIAL * client_credential, BOOT_ } #if defined(WINDOWS) && !defined(DONT_USE_MANDATORY_LOCK_IN_WINDOWS) - /* Under Windows/NT, it appears that locking a file prevents a subsequent open for write by the same process. + /* Under Windows/NT, it appears that locking a file prevents a subsequent open for write by the same process. * The cfg_write_directory will never succeed as long as the file is "mounted" by fileio_mount(). To allow * the cubrid.db file to be updated, dismount before calling cfg_. Note that this leaves an extremely small * windows where another process could steal our lock. */ @@ -2098,7 +2098,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Make sure that there is a database.txt and that the desired database * exists. We do not want to lock the database.txt at this point since we * are only reading it. However, if we do not find the desired database, @@ -2120,7 +2120,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db if (dir == NULL || ((db = cfg_find_db_list (dir, db_name)) == NULL)) { - /* + /* * Make sure that nobody was in the process of writing the * database.txt when we got a snapshot of it. */ @@ -2191,7 +2191,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db boot_Lob_path[0] = '\0'; } - /* + /* * Initialize error structure, critical section, slotted page, heap, and * recovery managers */ @@ -2272,6 +2272,13 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db tsc_init (); #endif /* !SERVER_MODE */ + // Initialize java stored procedure server + error_code = jsp_start_server (db_name, db->pathname); + if (error_code != NO_ERROR) + { + goto error; + } + /* *INDENT-OFF* */ #if defined (SA_MODE) // thread_manager was not initialized @@ -2288,7 +2295,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db pr_Enable_string_compression = prm_get_bool_value (PRM_ID_ENABLE_STRING_COMPRESSION); - /* + /* * Compose the full name of the database and find location of logs */ @@ -2308,12 +2315,12 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db /* Initialize the transaction table */ logtb_define_trantable (thread_p, -1, -1); - /* + /* * How to restart the system ? */ if (from_backup != false) { - /* + /* * RESTART FROM BACKUP */ #if defined(SA_MODE) @@ -2340,7 +2347,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Now continue the normal restart process. At this point the data volumes * are ok. However, some recovery may need to take place */ @@ -2441,7 +2448,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Now restart the recovery manager and execute any recovery actions */ @@ -2467,7 +2474,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Initialize the catalog manager, the query evaluator, and install meta * classes */ @@ -2511,7 +2518,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Initialize system locale using values from db_root system table */ error_code = @@ -2562,7 +2569,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db } #endif /* SERVER_MODE */ - /* + /* * Allocate a temporary transaction index to finish further system related * changes such as removal of temporary volumes and modifications of * system parameter @@ -2580,7 +2587,7 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db goto error; } - /* + /* * Remove any database temporary volumes */ @@ -2654,12 +2661,6 @@ boot_restart_server (THREAD_ENTRY * thread_p, bool print_restart, const char *db } } - error_code = jsp_start_server (db_name, db->pathname); - if (error_code != NO_ERROR) - { - goto error; - } - #if defined (SA_MODE) /* Completely vacuum database. */ if (r_args == NULL || r_args->is_restore_from_backup == false) @@ -2852,7 +2853,7 @@ xboot_restart_from_backup (THREAD_ENTRY * thread_p, int print_restart, const cha prm_set_bool_value (PRM_ID_DBFILES_PROTECT, false); - /* + /* * We need to do some initialization that normally happens in * boot_restart_server(), but only if the SERVER_MODE CPP variable is * defined. Unfortunately, if we're here, then SERVER_MODE is not defined @@ -3236,7 +3237,7 @@ xboot_notify_unregister_client (THREAD_ENTRY * thread_p, int tran_index) conn = thread_p->conn_entry; - /* sboot_notify_unregister_client should hold conn->rmutex. + /* sboot_notify_unregister_client should hold conn->rmutex. * Please see the comment of sboot_notify_unregister_client. */ @@ -3294,7 +3295,7 @@ boot_check_db_at_num_shutdowns (bool force_nshutdowns) return; } - /* + /* * Check the consistency of the database when the client is unregister */ @@ -3352,7 +3353,7 @@ boot_check_db_at_num_shutdowns (bool force_nshutdowns) "Some inconsistencies were detected in your database.\n Please consult error_log file = %s" " for additional information\n", tmpname); fflush (stdout); - /* + /* * The following is added so we can attach to the debugger on * a fatal error. It is of great help to stop execution when * running a set of sql scripts. (That is, find the script that @@ -3711,14 +3712,14 @@ boot_server_all_finalize (THREAD_ENTRY * thread_p, ER_FINAL_CODE is_er_final, * * return : NO_ERROR if all OK, ER_ status otherwise * - * backup_path(in): Location where information volumes are backed up. If NULL is given, the following defaults + * backup_path(in): Location where information volumes are backed up. If NULL is given, the following defaults * are assumed to back up each information volume: - * - If file "fileof_vols_and_backup_paths" is given, the path to backup each volume is found in + * - If file "fileof_vols_and_backup_paths" is given, the path to backup each volume is found in * this file. * - All information volumes are backed up on the same location where the log files are located. * backup_level(in): backup levels allowed: 0 - Full (default), * 1 - Incremental1, 2 - Incremental - * deleted_unneeded_logarchives(in): Whether to remove log archives that are not needed any longer to recovery from + * deleted_unneeded_logarchives(in): Whether to remove log archives that are not needed any longer to recovery from * crashes when the backup just created is used. * backup_verbose_file(in): verbose mode file path * num_threads: number of threads @@ -3726,8 +3727,8 @@ boot_server_all_finalize (THREAD_ENTRY * thread_p, ER_FINAL_CODE is_er_final, * zip_level: compression level * sleep_msecs(in): * - * Note: A fuzzy backup of the database is taken. The backup is written into the given backup_path location. - * If the backup_path location is omitted (i.e, NULL is given), the log path location which was specified at + * Note: A fuzzy backup of the database is taken. The backup is written into the given backup_path location. + * If the backup_path location is omitted (i.e, NULL is given), the log path location which was specified at * database creation is used to store the backup. */ int @@ -3760,7 +3761,7 @@ xboot_backup (THREAD_ENTRY * thread_p, const char *backup_path, FILEIO_BACKUP_LE * - Each volume is copied to same place where the volume resides. * Note: This parameter should be NULL, if the above file is given. * fileof_vols_and_wherepaths(in): A file is given when the user decides to control the copy/rename of the volume by - * individual bases. That is, user decides to spread the volumes over several locations + * individual bases. That is, user decides to spread the volumes over several locations * and or to label the volumes with specific names. * Each volume entry consists of: volid from_fullvolname to_fullvolname * newdb_overwrite(in): Whether to overwrite the new database if it already exist. @@ -3795,7 +3796,7 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char if (new_db_path == NULL || fileof_vols_and_copypaths != NULL) { - /* + /* * If a newdb path was given, it is ignored since only one option must * be specified */ @@ -3807,7 +3808,7 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char } } - /* + /* * Make sure that the db_path and log_path are the canonicalized absolute * pathnames */ @@ -3918,7 +3919,7 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char /* Make sure that the full path for the new database is not too long */ if ((int) (strlen (new_db_name) + strlen (new_db_path) + 2) > DB_MAX_PATH_LENGTH) { - /* + /* * db_path + db_name is too long */ er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ER_BO_FULL_DATABASE_NAME_IS_TOO_LONG, 3, new_db_path, new_db_name, @@ -3930,7 +3931,7 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char /* Get the log prefix */ new_log_prefix = fileio_get_base_file_name (new_db_name); - /* + /* * get the database directory information in write mode */ if (cfg_maycreate_get_directory_filename (dbtxt_label) == NULL @@ -3974,7 +3975,7 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char } else { - /* + /* * Delete the database.. to make sure that all backups, log archives, and * so on are removed... then continue... * Note: we do not call xboot_delete since it reverts a bunch of stuff. @@ -4021,13 +4022,13 @@ xboot_copy (REFPTR (THREAD_ENTRY, thread_p), const char *from_dbname, const char dir = NULL; } - /* + /* * Compose the full name of the new database */ COMPOSE_FULL_NAME (new_db_fullname, sizeof (new_db_fullname), new_db_path, new_db_name); - /* + /* * Copy the database */ @@ -4184,7 +4185,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char if (fileof_vols_and_renamepaths != NULL) { - /* + /* * If a newdb path was given, it is ignored since only one option must * be specified */ @@ -4198,7 +4199,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char if (new_db_path == NULL) { - /* + /* * Use the same location as the source database */ new_db_path = fileio_get_directory_path (allocdb_path, boot_Db_full_name); @@ -4216,7 +4217,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char if (new_log_path == NULL) { - /* + /* * Use the same log location as the source database */ new_log_path = fileio_get_directory_path (alloclog_path, log_Name_active); @@ -4232,7 +4233,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char } } - /* + /* * Make sure that the db_path and log_path are the canonicalized absolute * pathnames */ @@ -4276,7 +4277,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char /* Make sure that the full path for the new database is not too long */ if ((int) (strlen (new_db_name) + strlen (new_db_path) + 2) > DB_MAX_PATH_LENGTH) { - /* + /* * db_path + db_name is too long */ er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ER_BO_FULL_DATABASE_NAME_IS_TOO_LONG, 3, new_db_path, new_db_name, @@ -4288,7 +4289,7 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char /* Get the log prefix */ newlog_prefix = fileio_get_base_file_name (new_db_name); - /* + /* * get the database directory information in write mode */ if (cfg_maycreate_get_directory_filename (dbtxt_label) == NULL @@ -4334,13 +4335,13 @@ xboot_soft_rename (THREAD_ENTRY * thread_p, const char *old_db_name, const char goto end; } - /* + /* * Compose the full name of the new database */ COMPOSE_FULL_NAME (new_db_fullname, sizeof (new_db_fullname), new_db_path, new_db_name); - /* + /* * Rename the database */ @@ -4447,7 +4448,7 @@ xboot_delete (const char *db_name, bool force_delete, BOOT_SERVER_SHUTDOWN_MODE if (!BO_IS_SERVER_RESTARTED ()) { - /* + /* * Compose the full name of the database and find location of logs */ if (msgcat_init () != NO_ERROR) @@ -4485,7 +4486,7 @@ xboot_delete (const char *db_name, bool force_delete, BOOT_SERVER_SHUTDOWN_MODE /* Find the prefix for the database */ log_prefix = fileio_get_base_file_name (db_name); - /* + /* * get the database directory information in write mode. */ if (cfg_maycreate_get_directory_filename (dbtxt_label) == NULL) @@ -4513,7 +4514,7 @@ xboot_delete (const char *db_name, bool force_delete, BOOT_SERVER_SHUTDOWN_MODE if (error_code != NO_ERROR) { - /* + /* * If I cannot obtain a Lock on database.txt, it is better to quite at * this moment. We will not even perform a dirty delete. */ @@ -4538,14 +4539,14 @@ xboot_delete (const char *db_name, bool force_delete, BOOT_SERVER_SHUTDOWN_MODE goto error_dirty_delete; } - /* + /* * How can we perform the delete operation..without restarting the system * or restarted the system. */ if (!BO_IS_SERVER_RESTARTED ()) { - /* + /* * Compose the full name of the database and find location of logs */ COMPOSE_FULL_NAME (boot_Db_full_name, sizeof (boot_Db_full_name), db->pathname, db_name); @@ -4754,7 +4755,7 @@ boot_create_all_volumes (THREAD_ENTRY * thread_p, const BOOT_CLIENT_CREDENTIAL * goto error; } - /* + /* * Initialize the database parameter table */ @@ -4885,7 +4886,7 @@ boot_create_all_volumes (THREAD_ENTRY * thread_p, const BOOT_CLIENT_CREDENTIAL * goto error; } - /* + /* * Create the rest of the other volumes if any */ @@ -4910,7 +4911,7 @@ boot_create_all_volumes (THREAD_ENTRY * thread_p, const BOOT_CLIENT_CREDENTIAL * goto error; } - /* + /* * Initialize the catalog manager, the query evaluator, and install meta * classes */ @@ -4990,7 +4991,7 @@ boot_remove_all_volumes (THREAD_ENTRY * thread_p, const char *db_fullname, const goto error_rem_allvols; } - /* + /* * How can we perform the delete operation..without restarting the system * or restarted the system. */ @@ -5010,7 +5011,7 @@ boot_remove_all_volumes (THREAD_ENTRY * thread_p, const char *db_fullname, const return ER_FAILED; } - /* + /* * Initialize error structure, critical section, slotted page, heap, and * recovery managers */ @@ -5024,7 +5025,7 @@ boot_remove_all_volumes (THREAD_ENTRY * thread_p, const char *db_fullname, const if (log_get_io_page_size (thread_p, db_fullname, log_path, log_prefix) == -1) { - /* + /* * There is something wrong with this database... We will only remove * as much as we can */ @@ -5151,7 +5152,7 @@ xboot_emergency_patch (const char *db_name, bool recreate_log, DKNPAGES log_npag } /* *INDENT-ON* */ - /* + /* * Compose the full name of the database and find location of logs */ if (cfg_read_directory (&dir, false) != NO_ERROR) @@ -5163,7 +5164,7 @@ xboot_emergency_patch (const char *db_name, bool recreate_log, DKNPAGES log_npag if (dir == NULL || ((db = cfg_find_db_list (dir, db_name)) == NULL)) { - /* + /* * Make sure that nobody was in the process of writing the * database.txt when we got a snapshot of it. */ @@ -5228,7 +5229,7 @@ xboot_emergency_patch (const char *db_name, bool recreate_log, DKNPAGES log_npag goto error_exit; } - /* + /* * Initialize error structure, critical section, slotted page, heap, and * recovery managers */ @@ -5239,7 +5240,7 @@ xboot_emergency_patch (const char *db_name, bool recreate_log, DKNPAGES log_npag { if (recreate_log != 0) { - /* + /* * User must indicate the database pagesize through its own environment */ (void) db_set_page_size (IO_DEFAULT_PAGE_SIZE, IO_DEFAULT_PAGE_SIZE); @@ -5363,7 +5364,7 @@ xboot_emergency_patch (const char *db_name, bool recreate_log, DKNPAGES log_npag log_restart_emergency (thread_p, boot_Db_full_name, log_path, log_prefix); } - /* + /* * Initialize the catalog manager, the query evaluator, and install meta * classes */ @@ -5464,7 +5465,7 @@ boot_find_new_db_path (char *db_pathbuf, const char *fileof_vols_and_wherepaths) if (fileof_vols_and_wherepaths != NULL) { - /* + /* * Obtain the new database path from where paths file */ where_paths_fp = fopen (fileof_vols_and_wherepaths, "r"); diff --git a/src/transaction/locator_cl.h b/src/transaction/locator_cl.h index 50c7d616e20..d8f0f1ac3f3 100644 --- a/src/transaction/locator_cl.h +++ b/src/transaction/locator_cl.h @@ -39,7 +39,6 @@ #include "work_space.h" #include "storage_common.h" #include "locator.h" -#include "dbdef.h" #include "replication.h" #define ONE_MFLUSH true diff --git a/src/transaction/locator_sr.c b/src/transaction/locator_sr.c index 0d94fc4fdaa..3af667e65b4 100644 --- a/src/transaction/locator_sr.c +++ b/src/transaction/locator_sr.c @@ -7643,7 +7643,7 @@ locator_add_or_remove_index_internal (THREAD_ENTRY * thread_p, RECDES * recdes, BTID btid; DB_VALUE *key_dbvalue, *key_ins_del = NULL; DB_VALUE dbvalue; - int dummy_unique; + int unique_pk; BTREE_UNIQUE_STATS *unique_stat_info; HEAP_IDX_ELEMENTS_INFO idx_info; char buf[DBVAL_BUFSIZE + MAX_ALIGNMENT], *aligned_buf; @@ -7774,6 +7774,16 @@ locator_add_or_remove_index_internal (THREAD_ENTRY * thread_p, RECDES * recdes, p_mvcc_rec_header = mvcc_rec_header; } + unique_pk = 0; + if (index->type == BTREE_UNIQUE || index->type == BTREE_REVERSE_UNIQUE) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE; + } + else if (index->type == BTREE_PRIMARY_KEY) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE | BTREE_CONSTRAINT_PRIMARY_KEY; + } + if (is_insert) { #if defined(ENABLE_SYSTEMTAP) @@ -7788,10 +7798,19 @@ locator_add_or_remove_index_internal (THREAD_ENTRY * thread_p, RECDES * recdes, } } - error_code = - btree_insert (thread_p, &btid, key_dbvalue, class_oid, inst_oid, op_type, unique_stat_info, - &dummy_unique, p_mvcc_rec_header); - + if (index->index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + /* Online index is currently loading. */ + error_code = + btree_online_index_dispatcher (thread_p, &btid, key_dbvalue, class_oid, inst_oid, unique_pk, + BTREE_OP_ONLINE_INDEX_TRAN_INSERT, NULL); + } + else + { + error_code = + btree_insert (thread_p, &btid, key_dbvalue, class_oid, inst_oid, op_type, unique_stat_info, + &unique_pk, p_mvcc_rec_header); + } #if defined(ENABLE_SYSTEMTAP) CUBRID_IDX_INSERT_END (classname, index->btname, (error_code != NO_ERROR)); #endif /* ENABLE_SYSTEMTAP */ @@ -7804,15 +7823,25 @@ locator_add_or_remove_index_internal (THREAD_ENTRY * thread_p, RECDES * recdes, if (use_mvcc == true) { - /* in MVCC logical deletion means MVCC DEL_ID insertion */ - error_code = - btree_mvcc_delete (thread_p, &btid, key_dbvalue, class_oid, inst_oid, op_type, unique_stat_info, - &dummy_unique, p_mvcc_rec_header); + if (index->index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + /* Online index is currently loading. */ + error_code = + btree_online_index_dispatcher (thread_p, &btid, key_dbvalue, class_oid, inst_oid, + unique_pk, BTREE_OP_ONLINE_INDEX_TRAN_DELETE, NULL); + } + else + { + /* in MVCC logical deletion means MVCC DEL_ID insertion */ + error_code = + btree_mvcc_delete (thread_p, &btid, key_dbvalue, class_oid, inst_oid, op_type, unique_stat_info, + &unique_pk, p_mvcc_rec_header); + } } else { error_code = - btree_physical_delete (thread_p, &btid, key_dbvalue, inst_oid, class_oid, &dummy_unique, op_type, + btree_physical_delete (thread_p, &btid, key_dbvalue, inst_oid, class_oid, &unique_pk, op_type, unique_stat_info); if (error_code != NO_ERROR) { @@ -8143,7 +8172,7 @@ locator_update_index (THREAD_ENTRY * thread_p, RECDES * new_recdes, RECDES * old bool new_isnull, old_isnull; PR_TYPE *pr_type; OR_INDEX *index = NULL; - int i, j, k, num_btids, old_num_btids, unique; + int i, j, k, num_btids, old_num_btids, unique_pk; bool found_btid = true; BTREE_UNIQUE_STATS *unique_stat_info; HEAP_IDX_ELEMENTS_INFO new_idx_info; @@ -8178,6 +8207,7 @@ locator_update_index (THREAD_ENTRY * thread_p, RECDES * new_recdes, RECDES * old LOG_TDES *tdes; LOG_LSA preserved_repl_lsa; int tran_index; + BTID_INT btid_int; assert_release (class_oid != NULL); assert_release (!OID_ISNULL (class_oid)); @@ -8460,29 +8490,54 @@ locator_update_index (THREAD_ENTRY * thread_p, RECDES * new_recdes, RECDES * old mvcc_rec_header); p_mvcc_rec_header = mvcc_rec_header; } + + unique_pk = 0; + if (index->type == BTREE_UNIQUE || index->type == BTREE_REVERSE_UNIQUE) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE; + } + else if (index->type == BTREE_PRIMARY_KEY) + { + unique_pk = BTREE_CONSTRAINT_UNIQUE | BTREE_CONSTRAINT_PRIMARY_KEY; + } + if (do_delete_only) { - if (use_mvcc) + if (index->index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) { - /* in MVCC logical deletion means MVCC DEL_ID insertion */ error_code = - btree_mvcc_delete (thread_p, &old_btid, old_key, class_oid, oid, op_type, unique_stat_info, - &unique, p_mvcc_rec_header); + btree_online_index_dispatcher (thread_p, &index->btid, old_key, class_oid, oid, unique_pk, + BTREE_OP_ONLINE_INDEX_TRAN_DELETE, NULL); if (error_code != NO_ERROR) { - assert (er_errid () != NO_ERROR); + ASSERT_ERROR (); goto error; } } else - { - error_code = - btree_physical_delete (thread_p, &old_btid, old_key, oid, class_oid, &unique, op_type, - unique_stat_info); - if (error_code != NO_ERROR) + { /* Not online index. */ + if (use_mvcc) { - ASSERT_ERROR (); - goto error; + /* in MVCC logical deletion means MVCC DEL_ID insertion */ + error_code = + btree_mvcc_delete (thread_p, &old_btid, old_key, class_oid, oid, op_type, unique_stat_info, + &unique_pk, p_mvcc_rec_header); + if (error_code != NO_ERROR) + { + assert (er_errid () != NO_ERROR); + goto error; + } + } + else + { + error_code = + btree_physical_delete (thread_p, &old_btid, old_key, oid, class_oid, &unique_pk, op_type, + unique_stat_info); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + goto error; + } } } } @@ -8499,9 +8554,20 @@ locator_update_index (THREAD_ENTRY * thread_p, RECDES * new_recdes, RECDES * old } } - error_code = - btree_insert (thread_p, &old_btid, new_key, class_oid, oid, op_type, unique_stat_info, - &unique, p_mvcc_rec_header); + if (index->index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + /* Online index loading on current index. */ + error_code = + btree_online_index_dispatcher (thread_p, &index->btid, new_key, class_oid, oid, + unique_pk, BTREE_OP_ONLINE_INDEX_TRAN_INSERT, NULL); + } + else + { + error_code = + btree_insert (thread_p, &old_btid, new_key, class_oid, oid, op_type, unique_stat_info, + &unique_pk, p_mvcc_rec_header); + } + if (error_code != NO_ERROR) { ASSERT_ERROR (); @@ -8510,13 +8576,36 @@ locator_update_index (THREAD_ENTRY * thread_p, RECDES * new_recdes, RECDES * old } else { - error_code = - btree_update (thread_p, &old_btid, old_key, new_key, class_oid, oid, op_type, - unique_stat_info, &unique, p_mvcc_rec_header); + if (index->index_status == OR_ONLINE_INDEX_BUILDING_IN_PROGRESS) + { + /* Online index loading on current index. */ + /* This translates into a delete of the old key and an insert of the new key. */ - if (error_code != NO_ERROR) + /* Delete old key. */ + + error_code = + btree_online_index_dispatcher (thread_p, &index->btid, old_key, class_oid, oid, + unique_pk, BTREE_OP_ONLINE_INDEX_TRAN_DELETE, NULL); + if (error_code != NO_ERROR) + { + goto error; + } + + /* Insert new key. */ + error_code = + btree_online_index_dispatcher (thread_p, &index->btid, new_key, class_oid, oid, + unique_pk, BTREE_OP_ONLINE_INDEX_TRAN_INSERT, NULL); + } + else { - goto error; + error_code = + btree_update (thread_p, &old_btid, old_key, new_key, class_oid, oid, op_type, + unique_stat_info, &unique_pk, p_mvcc_rec_header); + + if (error_code != NO_ERROR) + { + goto error; + } } } } @@ -9156,6 +9245,8 @@ locator_repair_btree_by_delete (THREAD_ENTRY * thread_p, OID * class_oid, BTID * tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p); #endif /* SERVER_MODE */ + btree_init_temp_key_value (&clear_key, &key); + if (btree_find_key (thread_p, btid, inst_oid, &key, &clear_key) != DISK_VALID) { return DISK_INVALID; diff --git a/src/transaction/log_applier_sql_log.h b/src/transaction/log_applier_sql_log.h index bd8302ba4ba..96985ecb87c 100644 --- a/src/transaction/log_applier_sql_log.h +++ b/src/transaction/log_applier_sql_log.h @@ -26,7 +26,6 @@ #ident "$Id$" -#include "dbdef.h" #include "dbtype_def.h" #include "work_space.h" diff --git a/src/transaction/log_comm.h b/src/transaction/log_comm.h index e2d9d53986d..9a136a4dbe9 100644 --- a/src/transaction/log_comm.h +++ b/src/transaction/log_comm.h @@ -24,7 +24,6 @@ #include #include "storage_common.h" -#include "dbdef.h" #include "object_representation.h" #define LOG_USERNAME_MAX (DB_MAX_USER_LENGTH + 1) diff --git a/src/transaction/log_impl.h b/src/transaction/log_impl.h index 32988a9c844..2f4fad66b71 100644 --- a/src/transaction/log_impl.h +++ b/src/transaction/log_impl.h @@ -1767,7 +1767,7 @@ struct log_tdes LOG_RCV_TDES rcv; #if defined (SERVER_MODE) || (defined (SA_MODE) && defined (__cplusplus)) - cubreplication::log_generator replication_log_generator; + cubreplication::log_generator replication_log_generator; #endif }; @@ -2129,7 +2129,8 @@ extern int logpb_read_page_from_file (THREAD_ENTRY * thread_p, LOG_PAGEID pageid extern int logpb_read_page_from_active_log (THREAD_ENTRY * thread_p, LOG_PAGEID pageid, int num_pages, LOG_PAGE * log_pgptr); extern int logpb_write_page_to_disk (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, LOG_PAGEID logical_pageid); -extern PGLENGTH logpb_find_header_parameters (THREAD_ENTRY * thread_p, const char *db_fullname, const char *logpath, +extern PGLENGTH logpb_find_header_parameters (THREAD_ENTRY * thread_p, const bool force_read_log_header, + const char *db_fullname, const char *logpath, const char *prefix_logname, PGLENGTH * io_page_size, PGLENGTH * log_page_size, INT64 * db_creation, float *db_compatibility, int *db_charset); @@ -2446,6 +2447,7 @@ extern void logtb_wakeup_thread_with_tran_index (int tran_index, thread_resume_s extern bool logtb_set_check_interrupt (THREAD_ENTRY * thread_p, bool flag); extern bool logtb_get_check_interrupt (THREAD_ENTRY * thread_p); +extern int logpb_set_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr); #endif /* defined (SERVER_MODE) || defined (SA_MODE) */ #endif /* _LOG_IMPL_H_ */ diff --git a/src/transaction/log_manager.c b/src/transaction/log_manager.c index 2fbee68716c..13a013b1569 100644 --- a/src/transaction/log_manager.c +++ b/src/transaction/log_manager.c @@ -63,6 +63,7 @@ #include "heap_file.h" #include "slotted_page.h" #include "object_primitive.h" +#include "tz_support.h" #include "db_date.h" #include "fault_injection.h" #if defined (SA_MODE) @@ -340,7 +341,7 @@ static void log_cleanup_modified_class_list (THREAD_ENTRY * thread_p, LOG_TDES * static void log_append_compensate_internal (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvindex, const VPID * vpid, PGLENGTH offset, PAGE_PTR pgptr, int length, const void *data, - LOG_TDES * tdes, LOG_LSA * undo_nxlsa); + LOG_TDES * tdes, const LOG_LSA * undo_nxlsa); STATIC_INLINE void log_sysop_end_random_exit (THREAD_ENTRY * thread_p) __attribute__ ((ALWAYS_INLINE)); STATIC_INLINE void log_sysop_end_begin (THREAD_ENTRY * thread_p, int *tran_index_out, LOG_TDES ** tdes_out) @@ -357,6 +358,8 @@ static void log_tran_do_postpone (THREAD_ENTRY * thread_p, LOG_TDES * tdes); static void log_sysop_do_postpone (THREAD_ENTRY * thread_p, LOG_TDES * tdes, LOG_REC_SYSOP_END * sysop_end, int data_size, const char *data); +static int logtb_tran_update_stats_online_index_rb (THREAD_ENTRY * thread_p, void *data, void *args); + #if defined(SERVER_MODE) // *INDENT-OFF* static cubthread::daemon *log_Clock_daemon = NULL; @@ -3017,7 +3020,7 @@ log_append_compensate (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvindex, const VPI void log_append_compensate_with_undo_nxlsa (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvindex, const VPID * vpid, PGLENGTH offset, PAGE_PTR pgptr, int length, const void *data, LOG_TDES * tdes, - LOG_LSA * undo_nxlsa) + const LOG_LSA * undo_nxlsa) { assert (undo_nxlsa != NULL); @@ -3052,7 +3055,8 @@ log_append_compensate_with_undo_nxlsa (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcv */ static void log_append_compensate_internal (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvindex, const VPID * vpid, PGLENGTH offset, - PAGE_PTR pgptr, int length, const void *data, LOG_TDES * tdes, LOG_LSA * undo_nxlsa) + PAGE_PTR pgptr, int length, const void *data, LOG_TDES * tdes, + const LOG_LSA * undo_nxlsa) { LOG_REC_COMPENSATE *compensate; /* Compensate log record */ LOG_LSA prev_lsa; /* LSA of next record to undo */ @@ -5634,6 +5638,8 @@ log_abort_local (THREAD_ENTRY * thread_p, LOG_TDES * tdes, bool is_local_tran) log_rollback (thread_p, tdes, NULL); + log_update_global_btid_online_index_stats (thread_p); + log_cleanup_modified_class_list (thread_p, tdes, NULL, true, true); if (tdes->first_save_entry != NULL) @@ -9134,7 +9140,7 @@ log_recreate (THREAD_ENTRY * thread_p, const char *db_fullname, const char *logp * RESET RECOVERY INFORMATION ON ALL DATA VOLUMES */ - LSA_SET_INIT_NONTEMP (&init_nontemp_lsa); + LSA_SET_NULL (&init_nontemp_lsa); for (volid = LOG_DBFIRST_VOLID; volid != NULL_VOLID; volid = fileio_find_next_perm_volume (thread_p, volid)) { @@ -9236,7 +9242,7 @@ log_get_io_page_size (THREAD_ENTRY * thread_p, const char *db_fullname, const ch int dummy; LOG_CS_ENTER (thread_p); - if (logpb_find_header_parameters (thread_p, db_fullname, logpath, prefix_logname, &db_iopagesize, + if (logpb_find_header_parameters (thread_p, false, db_fullname, logpath, prefix_logname, &db_iopagesize, &log_page_size, &ignore_dbcreation, &ignore_dbcomp, &dummy) == -1) { /* @@ -9310,7 +9316,7 @@ log_get_charset_from_header_page (THREAD_ENTRY * thread_p, const char *db_fullna int db_charset = INTL_CODESET_NONE; LOG_CS_ENTER (thread_p); - if (logpb_find_header_parameters (thread_p, db_fullname, logpath, prefix_logname, &dummy_db_iopagesize, + if (logpb_find_header_parameters (thread_p, false, db_fullname, logpath, prefix_logname, &dummy_db_iopagesize, &dummy_ignore_log_page_size, &dummy_ignore_dbcreation, &dummy_ignore_dbcomp, &db_charset) == -1) { @@ -10829,3 +10835,76 @@ log_abort_task::execute (context_type &thread_ref) } #endif // SERVER_MODE // *INDENT-ON* + +void +log_update_global_btid_online_index_stats (THREAD_ENTRY * thread_p) +{ + LOG_TDES *tdes = LOG_FIND_TDES (LOG_FIND_THREAD_TRAN_INDEX (thread_p)); + int error_code = NO_ERROR; + + if (tdes == NULL) + { + return; + } + + error_code = + mht_map_no_key (thread_p, tdes->log_upd_stats.unique_stats_hash, logtb_tran_update_stats_online_index_rb, thread_p); + + if (error_code != NO_ERROR) + { + assert (false); + } +} + +/* + * logtb_tran_update_stats_online_index_rb - Updates statistics during an online index when a transaction + * gets rollbacked. + * + * TODO: This can be easily optimized since it is slow. Try to find a better approach! + */ +static int +logtb_tran_update_stats_online_index_rb (THREAD_ENTRY * thread_p, void *data, void *args) +{ + /* This is called only during a rollback on a transaction that has updated an index which was under + * online loading. + */ + LOG_TRAN_BTID_UNIQUE_STATS *unique_stats = (LOG_TRAN_BTID_UNIQUE_STATS *) data; + int error_code = NO_ERROR; + OID class_oid; +#if !defined (NDEBUG) + LOG_TDES *tdes = LOG_FIND_TDES (LOG_FIND_THREAD_TRAN_INDEX (thread_p)); + + assert (LOG_ISTRAN_ABORTED (tdes)); +#endif /* !NDEBUG */ + + if (unique_stats->deleted) + { + /* ignore if deleted */ + return NO_ERROR; + } + + OID_SET_NULL (&class_oid); + + error_code = btree_get_class_oid_of_unique_btid (thread_p, &unique_stats->btid, &class_oid); + if (error_code != NO_ERROR) + { + assert (false); + return error_code; + } + + assert (!OID_ISNULL (&class_oid)); + + if (!btree_is_btid_online_index (thread_p, &class_oid, &unique_stats->btid)) + { + /* We can skip. */ + return NO_ERROR; + } + + /* We can update the statistics. */ + error_code = + logtb_update_global_unique_stats_by_delta (thread_p, &unique_stats->btid, unique_stats->tran_stats.num_oids, + unique_stats->tran_stats.num_nulls, unique_stats->tran_stats.num_keys, + false); + + return error_code; +} diff --git a/src/transaction/log_manager.h b/src/transaction/log_manager.h index 1be8c1574bf..e2dcf46898e 100644 --- a/src/transaction/log_manager.h +++ b/src/transaction/log_manager.h @@ -122,7 +122,7 @@ extern void log_append_compensate (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvinde PAGE_PTR pgptr, int length, const void *data, LOG_TDES * tdes); extern void log_append_compensate_with_undo_nxlsa (THREAD_ENTRY * thread_p, LOG_RCVINDEX rcvindex, const VPID * vpid, PGLENGTH offset, PAGE_PTR pgptr, int length, const void *data, - LOG_TDES * tdes, LOG_LSA * undo_nxlsa); + LOG_TDES * tdes, const LOG_LSA * undo_nxlsa); extern void log_append_ha_server_state (THREAD_ENTRY * thread_p, int state); extern void log_append_empty_record (THREAD_ENTRY * thread_p, LOG_RECTYPE logrec_type, LOG_DATA_ADDR * addr); extern void log_skip_logging_set_lsa (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * addr); @@ -212,4 +212,6 @@ extern bool log_is_log_flush_daemon_available (); extern void log_flush_daemon_get_stats (UINT64 * statsp); #endif // SERVER_MODE +extern void log_update_global_btid_online_index_stats (THREAD_ENTRY * thread_p); + #endif /* _LOG_MANAGER_H_ */ diff --git a/src/transaction/log_page_buffer.c b/src/transaction/log_page_buffer.c index f555ee44a00..ef2d2bdf300 100644 --- a/src/transaction/log_page_buffer.c +++ b/src/transaction/log_page_buffer.c @@ -415,8 +415,6 @@ static int logpb_fetch_header_from_active_log (THREAD_ENTRY * thread_p, const ch const char *logpath, const char *prefix_logname, LOG_HEADER * hdr, LOG_PAGE * log_pgptr); static int logpb_compute_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, int *checksum_crc32); -STATIC_INLINE int logpb_set_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr) - __attribute__ ((ALWAYS_INLINE)); static int logpb_page_has_valid_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, bool * has_valid_checksum); /* @@ -490,11 +488,19 @@ logpb_initialize_log_buffer (LOG_BUFFER * log_buffer_p, LOG_PAGE * log_pg) * log_pgptr (in) : log page pointer * checksum_crc32(out): computed checksum * Note: Currently CRC32 is used as checksum. + * Note: any changes to this requires changes to logwr_check_page_checksum */ static int logpb_compute_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, int *checksum_crc32) { int error_code = NO_ERROR, saved_checksum_crc32; + const int block_size = 4096; + const int max_num_pages = IO_MAX_PAGE_SIZE / block_size; + const int sample_nbytes = 16; + int sampling_offset; + char buf[max_num_pages * sample_nbytes * 2]; + const int num_pages = LOG_PAGESIZE / block_size; + const size_t sizeof_buf = num_pages * sample_nbytes * 2; assert (log_pgptr != NULL && checksum_crc32 != NULL); @@ -504,8 +510,21 @@ logpb_compute_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, int /* Resets checksum to not affect the new computation. */ log_pgptr->hdr.checksum = 0; - /* Computes the page checksum. */ - error_code = crypt_crc32 (thread_p, (char *) log_pgptr, LOG_PAGESIZE, checksum_crc32); + char *p = buf; + for (int i = 0; i < num_pages; i++) + { + // first + sampling_offset = (i * block_size); + memcpy (p, ((char *) log_pgptr) + sampling_offset, sample_nbytes); + p += sample_nbytes; + + // last + sampling_offset = (i * block_size) + (block_size - sample_nbytes); + memcpy (p, ((char *) log_pgptr) + sampling_offset, sample_nbytes); + p += sample_nbytes; + } + + error_code = crypt_crc32 (thread_p, (char *) buf, sizeof_buf, checksum_crc32); /* Restores the saved checksum */ log_pgptr->hdr.checksum = saved_checksum_crc32; @@ -520,7 +539,7 @@ logpb_compute_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, int * log_pgptr (in) : log page pointer * Note: Currently CRC32 is used as checksum. */ -STATIC_INLINE int +int logpb_set_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr) { int error_code = NO_ERROR, checksum_crc32; @@ -2230,6 +2249,7 @@ logpb_write_page_to_disk (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, LOG_PAG * return: iopagesize or -1 * * db_fullname(in): Full name of the database + * force_read_log_header(in): force to read log header * logpath(in): Directory where the log volumes reside * prefix_logname(in): Name of the log volumes. It is usually set as database * name. For example, if the value is equal to "db", the @@ -2251,9 +2271,9 @@ logpb_write_page_to_disk (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr, LOG_PAG * NOTE:Find some database creation parameters such as pagesize, creation time, and disk compatability. */ PGLENGTH -logpb_find_header_parameters (THREAD_ENTRY * thread_p, const char *db_fullname, const char *logpath, - const char *prefix_logname, PGLENGTH * io_page_size, PGLENGTH * log_page_size, - INT64 * creation_time, float *db_compatibility, int *db_charset) +logpb_find_header_parameters (THREAD_ENTRY * thread_p, const bool force_read_log_header, const char *db_fullname, + const char *logpath, const char *prefix_logname, PGLENGTH * io_page_size, + PGLENGTH * log_page_size, INT64 * creation_time, float *db_compatibility, int *db_charset) { static LOG_HEADER hdr; /* Log header */ static bool is_header_read_from_file = false; @@ -2262,6 +2282,12 @@ logpb_find_header_parameters (THREAD_ENTRY * thread_p, const char *db_fullname, LOG_PAGE *log_pgptr = NULL; int error_code = NO_ERROR; + if (force_read_log_header) + { + is_header_read_from_file = false; + is_log_header_validated = false; + } + aligned_log_pgbuf = PTR_ALIGN (log_pgbuf, MAX_ALIGNMENT); assert (LOG_CS_OWN_WRITE_MODE (thread_p)); @@ -4324,6 +4350,7 @@ logpb_flush_all_append_pages (THREAD_ENTRY * thread_p) LOGWR_INFO *writer_info = &log_Gl.writer_info; LOGWR_ENTRY *entry; + THREAD_ENTRY *wait_thread_p; #endif /* SERVER_MODE */ assert (LOG_CS_OWN_WRITE_MODE (thread_p)); @@ -4555,8 +4582,25 @@ logpb_flush_all_append_pages (THREAD_ENTRY * thread_p) { if (entry->status == LOGWR_STATUS_WAIT) { - entry->status = LOGWR_STATUS_FETCH; - logtb_wakeup_thread_with_tran_index (entry->thread_p->tran_index, THREAD_LOGWR_RESUMED); + wait_thread_p = entry->thread_p; + assert (wait_thread_p != thread_p); + + thread_lock_entry (wait_thread_p); + + /* If THREAD_RESUME_DUE_TO_INTERRUPT, do not set the entry status to avoid deadlock + * between flush_end_cond and CSECT_LOG. + */ + if (thread_p->resume_status != THREAD_RESUME_DUE_TO_INTERRUPT) + { + /* Still waiting for LOGWR. */ + entry->status = LOGWR_STATUS_FETCH; + if (wait_thread_p->resume_status == THREAD_LOGWR_SUSPENDED) + { + thread_wakeup_already_had_mutex (wait_thread_p, THREAD_LOGWR_RESUMED); + } + } + + thread_unlock_entry (wait_thread_p); } entry = entry->next; } @@ -9352,8 +9396,8 @@ logpb_restore (THREAD_ENTRY * thread_p, const char *db_fullname, const char *log LOG_CS_ENTER (thread_p); - if (logpb_find_header_parameters (thread_p, db_fullname, logpath, prefix_logname, &db_iopagesize, &log_page_size, - &db_creation, &db_compatibility, &dummy) == -1) + if (logpb_find_header_parameters (thread_p, true, db_fullname, logpath, prefix_logname, &db_iopagesize, + &log_page_size, &db_creation, &db_compatibility, &dummy) == -1) { db_iopagesize = IO_PAGESIZE; log_page_size = LOG_PAGESIZE; @@ -11163,6 +11207,13 @@ logpb_delete (THREAD_ENTRY * thread_p, VOLID num_perm_vols, const char *db_fulln fileio_make_volume_info_name (vol_fullname, db_fullname); fileio_unformat (thread_p, vol_fullname); + /* Destroy DWB, if still exists. */ + fileio_make_dwb_name (vol_fullname, log_Path, log_Prefix); + if (fileio_is_volume_exist (vol_fullname)) + { + fileio_unformat (thread_p, vol_fullname); + } + if (force_delete) { /* diff --git a/src/transaction/log_recovery.c b/src/transaction/log_recovery.c index cd2be3980ee..72a3fed3c17 100644 --- a/src/transaction/log_recovery.c +++ b/src/transaction/log_recovery.c @@ -1606,14 +1606,36 @@ log_rv_analysis_sysop_end (THREAD_ENTRY * thread_p, int tran_id, LOG_LSA * log_l tdes->topops.last = -1; } - if (LSA_LT (&sysop_end->lastparent_lsa, &tdes->rcv.atomic_sysop_start_lsa)) + // if this is the end of atomic system operation or system operation postpone phase, now it is time to reset it + // + // NOTE - we might actually be in both a system operation postpone phase and an atomic system operation, one nested + // in the other. we need to check which is last and end sysop should belong to that. + // + // NOTE - I really hate this guessing state system and we really, really should consider a more deterministic way. + // Logging ALL started system operations and replicating the system operation stack precisely would really + // help us avoiding all these ambiguities. + // + + // do we reset atomic sysop? next conditions must be met: + // 1. is there atomic system operation started? + // 2. is atomic system operation more recent than start postpone? + // 3. is atomic system operation equal or more recent to system operation last parent? + if (!LSA_ISNULL (&tdes->rcv.atomic_sysop_start_lsa) /* 1 */ + && LSA_GT (&tdes->rcv.atomic_sysop_start_lsa, &tdes->rcv.sysop_start_postpone_lsa) /* 2 */ + && LSA_GE (&tdes->rcv.atomic_sysop_start_lsa, &sysop_end->lastparent_lsa) /* 3 */ ) { /* reset tdes->rcv.atomic_sysop_start_lsa */ LSA_SET_NULL (&tdes->rcv.atomic_sysop_start_lsa); } - if (LSA_LT (&sysop_end->lastparent_lsa, &tdes->rcv.sysop_start_postpone_lsa)) + // do we reset sysop start postpone? next conditions must be met: + // 1. is there system operation start postpone in progress? + // 2. is system operation start postpone more recent than atomic system operation? + // 3. is system operation start postpone more recent than system operation last parent? + if (!LSA_ISNULL (&tdes->rcv.sysop_start_postpone_lsa) + && LSA_GT (&tdes->rcv.sysop_start_postpone_lsa, &tdes->rcv.atomic_sysop_start_lsa) + && LSA_GT (&tdes->rcv.sysop_start_postpone_lsa, &sysop_end->lastparent_lsa)) { - /* reset tdes->rcv.atomic_sysop_start_lsa */ + /* reset tdes->rcv.sysop_start_postpone_lsa */ LSA_SET_NULL (&tdes->rcv.sysop_start_postpone_lsa); } diff --git a/src/transaction/log_tran_table.c b/src/transaction/log_tran_table.c index 8af721f8927..0c10ba6ee8f 100644 --- a/src/transaction/log_tran_table.c +++ b/src/transaction/log_tran_table.c @@ -57,6 +57,7 @@ #include "serial.h" #include "show_scan.h" #include "boot_sr.h" +#include "tz_support.h" #include "db_date.h" #include "dbtype.h" #if defined (SERVER_MODE) diff --git a/src/transaction/log_writer.c b/src/transaction/log_writer.c index 4a49c719b2d..35c767baa69 100644 --- a/src/transaction/log_writer.c +++ b/src/transaction/log_writer.c @@ -38,6 +38,7 @@ #include "system_parameter.h" #include "connection_support.h" #include "log_applier.h" +#include "crypt_opfunc.h" #if defined(SERVER_MODE) #include "server_support.h" #include "network_interface_sr.h" @@ -54,6 +55,9 @@ static int prev_ha_server_state = HA_SERVER_STATE_NA; static bool logwr_need_shutdown = false; + +static int logwr_check_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr); + #if defined(CS_MODE) LOGWR_GLOBAL logwr_Gl = { /* log header */ @@ -763,6 +767,8 @@ logwr_writev_append_pages (LOG_PAGE ** to_flush, DKNPAGES npages) { fpageid = to_flush[0]->hdr.logical_pageid; + (void) logwr_check_page_checksum (NULL, *to_flush); + /* 1. archive temp write */ if (prm_get_bool_value (PRM_ID_LOG_BACKGROUND_ARCHIVING)) { @@ -1668,6 +1674,73 @@ logwr_copy_log_file (const char *db_name, const char *log_path, int mode, INT64 } #endif /* !CS_MODE */ + + +/* + * logwr_compute_page_checksum - Computes log page checksum. + * return: error code + * thread_p (in) : thread entry + * log_pgptr (in) : log page pointer + * checksum_crc32(out): computed checksum + * Note: Currently CRC32 is used as checksum. + * Note: this is a copy of logpb_compute_page_checksum + */ +static int +logwr_check_page_checksum (THREAD_ENTRY * thread_p, LOG_PAGE * log_pgptr) +{ + int error_code = NO_ERROR, saved_checksum_crc32; + const int block_size = 4096; + const int max_num_pages = IO_MAX_PAGE_SIZE / block_size; + const int sample_nbytes = 16; + int sampling_offset; + char buf[max_num_pages * sample_nbytes * 2]; + const int num_pages = LOG_PAGESIZE / block_size; + const size_t sizeof_buf = num_pages * sample_nbytes * 2; + int checksum_crc32; + + assert (log_pgptr != NULL); + + /* Save the old page checksum. */ + saved_checksum_crc32 = log_pgptr->hdr.checksum; + if (saved_checksum_crc32 == 0) + { + return NO_ERROR; + } + + /* Resets checksum to not affect the new computation. */ + log_pgptr->hdr.checksum = 0; + + char *p = buf; + for (int i = 0; i < num_pages; i++) + { + // first + sampling_offset = (i * block_size); + memcpy (p, ((char *) log_pgptr) + sampling_offset, sample_nbytes); + p += sample_nbytes; + + // last + sampling_offset = (i * block_size) + (block_size - sample_nbytes); + memcpy (p, ((char *) log_pgptr) + sampling_offset, sample_nbytes); + p += sample_nbytes; + } + + error_code = crypt_crc32 (thread_p, (char *) buf, sizeof_buf, &checksum_crc32); + + /* Restores the saved checksum */ + log_pgptr->hdr.checksum = saved_checksum_crc32; + + if (checksum_crc32 != saved_checksum_crc32) + { + _er_log_debug (ARG_FILE_LINE, + "logwr_check_page_checksum: log page %lld has checksum = %d, computed checksum = %d\n", + (long long int) log_pgptr->hdr.logical_pageid, saved_checksum_crc32, checksum_crc32); + assert (false); + return ER_FAILED; + } + + return error_code; +} + /* * logwr_log_ha_filestat_to_string() - return the string alias of enum value * @@ -2033,6 +2106,16 @@ logwr_pack_log_pages (THREAD_ENTRY * thread_p, char *logpg_area, int *logpg_used } } + if (pageid >= nxio_lsa.pageid) + { + /* page is not flushed yet, may be changed : update checksum before send */ + (void) logpb_set_page_checksum (thread_p, log_pgptr); + } + else + { + (void) logwr_check_page_checksum (thread_p, log_pgptr); + } + assert (pageid == (log_pgptr->hdr.logical_pageid)); p += LOG_PAGESIZE; } diff --git a/src/transaction/recovery.c b/src/transaction/recovery.c index f134dd42aa7..9baf6c9870f 100644 --- a/src/transaction/recovery.c +++ b/src/transaction/recovery.c @@ -801,6 +801,18 @@ struct rvfun RV_fun[] = { NULL, NULL, NULL}, + {RVBT_ONLINE_INDEX_UNDO_TRAN_INSERT, + "RVBT_ONLINE_INDEX_UNDO_TRAN_INSERT", + btree_rv_keyval_undo_online_index_tran_insert, + btree_rv_redo_record_modify, + btree_rv_keyval_dump, + log_rv_dump_hexa}, + {RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE, + "RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE", + btree_rv_keyval_undo_online_index_tran_delete, + btree_rv_redo_record_modify, + btree_rv_keyval_dump, + log_rv_dump_hexa}, }; diff --git a/src/transaction/recovery.h b/src/transaction/recovery.h index e032fc9b2b5..2b67e28002c 100644 --- a/src/transaction/recovery.h +++ b/src/transaction/recovery.h @@ -177,7 +177,10 @@ typedef enum RVPGBUF_DEALLOC = 122, RVPGBUF_COMPENSATE_DEALLOC = 123, - RV_LAST_LOGID = RVPGBUF_COMPENSATE_DEALLOC, + RVBT_ONLINE_INDEX_UNDO_TRAN_INSERT = 124, + RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE = 125, + + RV_LAST_LOGID = RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE, RV_NOT_DEFINED = 999 } LOG_RCVINDEX; @@ -226,7 +229,9 @@ extern void rv_check_rvfuns (void); || (idx) == RVBT_MARK_DELETED \ || (idx) == RVBT_DELETE_OBJECT_POSTPONE \ || (idx) == RVBT_MVCC_INSERT_OBJECT_UNQ \ - || (idx) == RVBT_MVCC_NOTIFY_VACUUM) + || (idx) == RVBT_MVCC_NOTIFY_VACUUM \ + || (idx) == RVBT_ONLINE_INDEX_UNDO_TRAN_DELETE \ + || (idx) == RVBT_ONLINE_INDEX_UNDO_TRAN_INSERT) #define RCV_IS_LOGICAL_COMPENSATE_MANUAL(idx) \ (RCV_IS_BTREE_LOGICAL_LOG(idx) \ diff --git a/src/transaction/transaction_cl.c b/src/transaction/transaction_cl.c index 441ca343194..288bf5982cd 100644 --- a/src/transaction/transaction_cl.c +++ b/src/transaction/transaction_cl.c @@ -53,7 +53,6 @@ #include "schema_manager.h" #include "trigger_manager.h" #include "system_parameter.h" -#include "dbdef.h" #include "db.h" /* for db_Connect_status */ #include "porting.h" #include "network_interface_cl.h" diff --git a/src/transaction/transaction_cl.h b/src/transaction/transaction_cl.h index a95ce7ec413..b28c64791c9 100644 --- a/src/transaction/transaction_cl.h +++ b/src/transaction/transaction_cl.h @@ -38,7 +38,6 @@ #include "locator.h" #include "storage_common.h" #include "log_comm.h" -#include "dbdef.h" #define TM_TRAN_INDEX() (tm_Tran_index) #define TM_TRAN_ISOLATION() (tm_Tran_isolation) diff --git a/src/xasl/access_json_table.cpp b/src/xasl/access_json_table.cpp new file mode 100644 index 00000000000..3c3dd2b23f5 --- /dev/null +++ b/src/xasl/access_json_table.cpp @@ -0,0 +1,376 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// access_json_table.cpp - implementation of structures required to access json table spec type. +// + +#include "access_json_table.hpp" + +#include "db_json.hpp" +#include "dbtype.h" +#include "error_code.h" +#include "error_manager.h" +#include "object_primitive.h" + +#include + +namespace cubxasl +{ + namespace json_table + { + + int + column::trigger_on_error (const JSON_DOC &input, const TP_DOMAIN_STATUS &status_cast, db_value &value_out) + { + (void) pr_clear_value (&value_out); + (void) db_make_null (&value_out); + + switch (m_on_error.m_behavior) + { + case JSON_TABLE_RETURN_NULL: + er_clear (); + return NO_ERROR; + + case JSON_TABLE_THROW_ERROR: + { + PRIVATE_UNIQUE_PTR unique_ptr_json_body (db_json_get_raw_json_body_from_document (&input), NULL); + + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_JSON_TABLE_ON_ERROR_INCOMP_DOMAIN, 4, + unique_ptr_json_body.get (), m_path, m_column_name, + pr_type_name (TP_DOMAIN_TYPE (m_domain))); + + return ER_JSON_TABLE_ON_ERROR_INCOMP_DOMAIN; + } + + case JSON_TABLE_DEFAULT_VALUE: + assert (m_on_error.m_default_value != NULL); + er_clear (); + if (pr_clone_value (m_on_error.m_default_value, &value_out) != NO_ERROR) + { + assert (false); + } + return NO_ERROR; + + default: + assert (false); + return ER_FAILED; + } + } + + int + column::trigger_on_empty (db_value &value_out) + { + (void) pr_clear_value (&value_out); + (void) db_make_null (&value_out); + + switch (m_on_empty.m_behavior) + { + case JSON_TABLE_RETURN_NULL: + return NO_ERROR; + + case JSON_TABLE_THROW_ERROR: + er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_JSON_TABLE_ON_EMPTY_ERROR, 1, m_column_name); + return ER_JSON_TABLE_ON_EMPTY_ERROR; + + case JSON_TABLE_DEFAULT_VALUE: + assert (m_on_empty.m_default_value != NULL); + if (pr_clone_value (m_on_empty.m_default_value, &value_out) != NO_ERROR) + { + assert (false); + } + return NO_ERROR; + + default: + assert (false); + return ER_FAILED; + } + } + + column::column (void) + { + init (); + } + + void + column::init () + { + m_domain = NULL; + m_path = NULL; + m_column_name = NULL; + m_output_value_pointer = NULL; + m_function = json_table_column_function::JSON_TABLE_EXTRACT; + m_on_error.m_default_value = NULL; + m_on_error.m_behavior = json_table_column_behavior_type::JSON_TABLE_RETURN_NULL; + m_on_empty.m_default_value = NULL; + m_on_empty.m_behavior = json_table_column_behavior_type::JSON_TABLE_RETURN_NULL; + } + + int + column::evaluate_extract (const JSON_DOC &input) + { + int error_code = NO_ERROR; + JSON_DOC *docp = NULL; + TP_DOMAIN_STATUS status_cast = TP_DOMAIN_STATUS::DOMAIN_COMPATIBLE; + + error_code = db_json_extract_document_from_path (&input, m_path, docp); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + assert (db_value_is_null (m_output_value_pointer)); + return ER_FAILED; + } + + if (docp == NULL) + { + error_code = trigger_on_empty (*m_output_value_pointer); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + } + return error_code; + } + + // clear previous output_value + pr_clear_value (m_output_value_pointer); + + if (db_make_json (m_output_value_pointer, docp, true) != NO_ERROR) + { + assert (false); + return ER_FAILED; + } + + status_cast = tp_value_cast (m_output_value_pointer, m_output_value_pointer, m_domain, false); + if (status_cast != TP_DOMAIN_STATUS::DOMAIN_COMPATIBLE) + { + error_code = trigger_on_error (input, status_cast, *m_output_value_pointer); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + } + } + + return error_code; + } + + int + column::evaluate_exists (const JSON_DOC &input) + { + int error_code = NO_ERROR; + bool result = false; + TP_DOMAIN_STATUS status_cast = TP_DOMAIN_STATUS::DOMAIN_COMPATIBLE; + + error_code = db_json_contains_path (&input, m_path, result); + if (error_code != NO_ERROR) + { + ASSERT_ERROR (); + assert (db_value_is_null (m_output_value_pointer)); + return ER_FAILED; + } + + db_make_short (m_output_value_pointer, result ? 1 : 0); + + status_cast = tp_value_cast (m_output_value_pointer, m_output_value_pointer, m_domain, false); + if (status_cast != TP_DOMAIN_STATUS::DOMAIN_COMPATIBLE) + { + return ER_FAILED; + } + + return error_code; + } + + int + column::evaluate_ordinality (size_t ordinality) + { + TP_DOMAIN_STATUS status_cast = TP_DOMAIN_STATUS::DOMAIN_COMPATIBLE; + + assert (m_domain->type->id == DB_TYPE_INTEGER); + + db_make_int (m_output_value_pointer, ordinality); + + return NO_ERROR; + } + + int + column::evaluate (const JSON_DOC &input, size_t ordinality) + { + assert (m_output_value_pointer != NULL); + + pr_clear_value (m_output_value_pointer); + db_make_null (m_output_value_pointer); + + int error_code = NO_ERROR; + + switch (m_function) + { + case json_table_column_function::JSON_TABLE_EXTRACT: + error_code = evaluate_extract (input); + break; + case json_table_column_function::JSON_TABLE_EXISTS: + error_code = evaluate_exists (input); + break; + case json_table_column_function::JSON_TABLE_ORDINALITY: + error_code = evaluate_ordinality (ordinality); + break; + default: + return ER_FAILED; + } + + return error_code; + } + + node::node (void) + { + init (); + } + + void + node::init () + { + m_path = NULL; + m_ordinality = 1; + m_need_inc_ordinality = true; + m_output_columns = NULL; + m_output_columns_size = 0; + m_nested_nodes = NULL; + m_nested_nodes_size = 0; + m_id = 0; + m_iterator = NULL; + m_expand_type = json_table_expand_type::JSON_TABLE_NO_EXPAND; + } + + void + node::clear_columns (bool is_final_clear) + { + for (size_t i = 0; i < m_output_columns_size; ++i) + { + column *output_column = &m_output_columns[i]; + if (is_final_clear) + { + (void) pr_clear_value (output_column->m_on_empty.m_default_value); + (void) pr_clear_value (output_column->m_on_error.m_default_value); + } + + (void) pr_clear_value (output_column->m_output_value_pointer); + (void) db_make_null (output_column->m_output_value_pointer); + } + } + + void + node::clear_iterators (bool is_final_clear) + { + if (is_final_clear) + { + db_json_delete_json_iterator (m_iterator); + } + else + { + db_json_clear_json_iterator (m_iterator); + } + + for (size_t i = 0; i < m_nested_nodes_size; ++i) + { + m_nested_nodes[i].clear_iterators (is_final_clear); + } + } + + void + node::clear_tree (bool is_final_clear) + { + clear_columns (is_final_clear); + + for (size_t i = 0; i < m_nested_nodes_size; ++i) + { + m_nested_nodes[i].clear_tree (is_final_clear); + } + } + + bool + node::str_ends_with (const std::string &str, const std::string &end) + { + return end.size () <= str.size () && str.compare (str.size () - end.size (), end.size (), end) == 0; + } + + bool + node::check_need_expand () const + { + return m_expand_type != json_table_expand_type::JSON_TABLE_NO_EXPAND; + } + + void + node::set_parent_path () + { + if (!check_need_expand ()) + { + assert (false); + return; + } + + if (m_expand_type == json_table_expand_type::JSON_TABLE_ARRAY_EXPAND) + { + std::string s (m_path); + s.assign (s.substr (0, s.size () - 3)); + + // will only shrink + + strcpy (m_path, s.c_str ()); + m_path[s.size ()] = 0; + } + else if (m_expand_type == json_table_expand_type::JSON_TABLE_OBJECT_EXPAND) + { + std::string s (m_path); + s.assign (s.substr (0, s.size () - 2)); + + // will only shrink + strcpy (m_path, s.c_str ()); + m_path[s.size ()] = 0; + } + } + + void + node::init_iterator () + { + if (check_need_expand ()) + { + if (m_expand_type == json_table_expand_type::JSON_TABLE_ARRAY_EXPAND) + { + m_iterator = db_json_create_iterator (DB_JSON_TYPE::DB_JSON_ARRAY); + } + else if (m_expand_type == json_table_expand_type::JSON_TABLE_OBJECT_EXPAND) + { + m_iterator = db_json_create_iterator (DB_JSON_TYPE::DB_JSON_OBJECT); + } + } + } + + spec_node::spec_node () + { + init (); + } + + void + spec_node::init () + { + m_root_node = NULL; + m_json_reguvar = NULL; + m_node_count = 0; + } + + } // namespace json_table +} // namespace cubxasl diff --git a/src/xasl/access_json_table.hpp b/src/xasl/access_json_table.hpp new file mode 100644 index 00000000000..f6dac50a140 --- /dev/null +++ b/src/xasl/access_json_table.hpp @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// access_json_table.hpp - defines structures required to access json table spec type. +// + +#ifndef _ACCESS_JSON_TABLE_H_ +#define _ACCESS_JSON_TABLE_H_ + +#include +#include + +#include + +#include "json_table_def.h" +#include "object_domain.h" + +// forward declarations +struct db_value; +struct tp_domain; +struct regu_variable_node; +class JSON_DOC; +class JSON_ITERATOR; + +namespace cubxasl +{ + namespace json_table + { + + struct column + { + tp_domain *m_domain; + char *m_path; + char *m_column_name; + json_table_column_behavior m_on_error; + json_table_column_behavior m_on_empty; + db_value *m_output_value_pointer; // should match xasl->outptr_list value pointers + + // there are three types of columns based on how they function: + // extract from path, exists at path or ordinality + json_table_column_function m_function; + + column (); + + void init (); + int evaluate (const JSON_DOC &input, size_t ordinality); + + private: + int evaluate_extract (const JSON_DOC &input); + int evaluate_exists (const JSON_DOC &input); + int evaluate_ordinality (size_t ordinality); + + int trigger_on_error (const JSON_DOC &input, const TP_DOMAIN_STATUS &status_cast, db_value &value_out); + int trigger_on_empty (db_value &value_out); + }; + + struct node + { + char *m_path; + size_t m_ordinality; // will be used to count the row ordinality + bool m_need_inc_ordinality; + column *m_output_columns; // columns part of output only + size_t m_output_columns_size; + node *m_nested_nodes; // nested nodes + size_t m_nested_nodes_size; + size_t m_id; // identifier for each node + JSON_ITERATOR *m_iterator; + json_table_expand_type m_expand_type; + + node (void); + + void init (); + void clear_columns (bool is_final_clear); + void clear_iterators (bool is_final_clear); + void clear_tree (bool is_final_clear); + + bool check_need_expand () const; + static bool str_ends_with (const std::string &str, const std::string &end); + void set_parent_path (); + void init_iterator (); + }; + + struct spec_node + { + node *m_root_node; + regu_variable_node *m_json_reguvar; + std::size_t m_node_count; // the total number of nodes + + spec_node (); + + void init (); + }; + + } // namespace json_table +} // namespace cubxasl + +// to be used outside namespace +using json_table_column = cubxasl::json_table::column; +using json_table_node = cubxasl::json_table::node; +using json_table_spec_node = cubxasl::json_table::spec_node; + +#endif // _ACCESS_JSON_TABLE_H_ diff --git a/src/xasl/compile_context.h b/src/xasl/compile_context.h new file mode 100644 index 00000000000..794a01102ed --- /dev/null +++ b/src/xasl/compile_context.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// compile_context.h - client/server common context used for prepare phase +// + +#ifndef _COMPILE_CONTEXT_H_ +#define _COMPILE_CONTEXT_H_ + +// forward definitions +struct xasl_node; + +// note - file should be compatible to C language + +#include "sha1.h" + +/* + * COMPILE_CONTEXT cover from user input query string to generated xasl + */ +typedef struct compile_context COMPILE_CONTEXT; +struct compile_context +{ + struct xasl_node *xasl; + + char *sql_user_text; /* original query statement that user input */ + int sql_user_text_len; /* length of sql_user_text */ + + char *sql_hash_text; /* rewritten query string which is used as hash key */ + + char *sql_plan_text; /* plans for this query */ + int sql_plan_alloc_size; /* query_plan alloc size */ + bool is_xasl_pinned_reference; /* to pin xasl cache entry */ + bool recompile_xasl_pinned; /* whether recompile again after xasl cache entry has been pinned */ + bool recompile_xasl; + SHA1Hash sha1; +}; +#endif // _COMPILE_CONTEXT_H_ diff --git a/src/xasl/xasl_stream.cpp b/src/xasl/xasl_stream.cpp new file mode 100644 index 00000000000..11d46756126 --- /dev/null +++ b/src/xasl/xasl_stream.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// XASL stream - common interface for xasl_to_stream and stream_to_xasl +// + +#include "xasl_stream.hpp" + +#include "memory_alloc.h" +#include "object_representation.h" + +#if !defined(SERVER_MODE) +static XASL_UNPACK_INFO *xasl_Unpack_info = NULL; +static int stx_Xasl_errcode = NO_ERROR; +#endif /* !SERVER_MODE */ + +/* + * stx_get_xasl_unpack_info_ptr () - + * return: + */ +XASL_UNPACK_INFO * +stx_get_xasl_unpack_info_ptr (THREAD_ENTRY *thread_p) +{ +#if defined(SERVER_MODE) + return (XASL_UNPACK_INFO *) thread_p->xasl_unpack_info_ptr; +#else /* SERVER_MODE */ + return (XASL_UNPACK_INFO *) xasl_Unpack_info; +#endif /* SERVER_MODE */ +} + +/* + * stx_set_xasl_unpack_info_ptr () - + * return: + * ptr(in) : + */ +void +stx_set_xasl_unpack_info_ptr (THREAD_ENTRY *thread_p, XASL_UNPACK_INFO *ptr) +{ +#if defined (SERVER_MODE) + thread_p->xasl_unpack_info_ptr = ptr; +#else + xasl_Unpack_info = ptr; +#endif +} + +/* + * stx_get_xasl_errcode () - + * return: + */ +int +stx_get_xasl_errcode (THREAD_ENTRY *thread_p) +{ +#if defined(SERVER_MODE) + return thread_p->xasl_errcode; +#else /* SERVER_MODE */ + return stx_Xasl_errcode; +#endif /* SERVER_MODE */ +} + +/* + * stx_set_xasl_errcode () - + * return: + * errcode(in) : + */ +void +stx_set_xasl_errcode (THREAD_ENTRY *thread_p, int errcode) +{ +#if defined(SERVER_MODE) + thread_p->xasl_errcode = errcode; +#else /* SERVER_MODE */ + stx_Xasl_errcode = errcode; +#endif /* SERVER_MODE */ +} + +/* + * stx_init_xasl_unpack_info () - + * return: + * xasl_stream(in) : pointer to xasl stream + * xasl_stream_size(in) : + * + * Note: initialize the xasl pack information. + */ +int +stx_init_xasl_unpack_info (THREAD_ENTRY *thread_p, char *xasl_stream, int xasl_stream_size) +{ + int n; + XASL_UNPACK_INFO *unpack_info; + int head_offset, body_offset; + +#define UNPACK_SCALE 3 /* TODO: assume */ + + head_offset = sizeof (XASL_UNPACK_INFO); + head_offset = xasl_stream_make_align (head_offset); + body_offset = xasl_stream_size * UNPACK_SCALE; + body_offset = xasl_stream_make_align (body_offset); + unpack_info = (XASL_UNPACK_INFO *) db_private_alloc (thread_p, head_offset + body_offset); + stx_set_xasl_unpack_info_ptr (thread_p, unpack_info); + if (unpack_info == NULL) + { + return ER_FAILED; + } + unpack_info->packed_xasl = xasl_stream; + unpack_info->packed_size = xasl_stream_size; + for (n = 0; n < MAX_PTR_BLOCKS; ++n) + { + unpack_info->ptr_blocks[n] = (STX_VISITED_PTR *) 0; + unpack_info->ptr_lwm[n] = 0; + unpack_info->ptr_max[n] = 0; + } + unpack_info->alloc_size = xasl_stream_size * UNPACK_SCALE; + unpack_info->alloc_buf = (char *) unpack_info + head_offset; + unpack_info->additional_buffers = NULL; + unpack_info->track_allocated_bufers = 0; +#if defined (SERVER_MODE) + unpack_info->thrd = thread_p; +#endif /* SERVER_MODE */ + + return NO_ERROR; +} + +/* + * stx_mark_struct_visited () - + * return: if successful, return NO_ERROR, otherwise + * ER_FAILED and error code is set to xasl_errcode + * ptr(in) : pointer constant to be marked visited + * str(in) : where the struct pointed by 'ptr' is stored + * + * Note: mark the given pointer constant as visited to avoid + * duplicated storage of a struct which is pointed by more than one node + */ +int +stx_mark_struct_visited (THREAD_ENTRY *thread_p, const void *ptr, void *str) +{ + int new_lwm; + int block_no; + XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); + + block_no = xasl_stream_get_ptr_block (ptr); + new_lwm = xasl_unpack_info->ptr_lwm[block_no]; + + if (xasl_unpack_info->ptr_max[block_no] == 0) + { + xasl_unpack_info->ptr_max[block_no] = START_PTR_PER_BLOCK; + xasl_unpack_info->ptr_blocks[block_no] = + (STX_VISITED_PTR *) db_private_alloc (thread_p, sizeof (STX_VISITED_PTR) * xasl_unpack_info->ptr_max[block_no]); + } + else if (xasl_unpack_info->ptr_max[block_no] <= new_lwm) + { + xasl_unpack_info->ptr_max[block_no] *= 2; + xasl_unpack_info->ptr_blocks[block_no] = + (STX_VISITED_PTR *) db_private_realloc (thread_p, xasl_unpack_info->ptr_blocks[block_no], + sizeof (STX_VISITED_PTR) * xasl_unpack_info->ptr_max[block_no]); + } + + if (xasl_unpack_info->ptr_blocks[block_no] == (STX_VISITED_PTR *) NULL) + { + stx_set_xasl_errcode (thread_p, ER_OUT_OF_VIRTUAL_MEMORY); + return ER_FAILED; + } + + xasl_unpack_info->ptr_blocks[block_no][new_lwm].ptr = ptr; + xasl_unpack_info->ptr_blocks[block_no][new_lwm].str = str; + + xasl_unpack_info->ptr_lwm[block_no]++; + + return NO_ERROR; +} + +/* + * stx_get_struct_visited_ptr () - + * return: if the ptr is already visited, the offset of + * position where the node pointed by 'ptr' is stored, + * otherwise, ER_FAILED (xasl_errcode is NOT set) + * ptr(in) : pointer constant to be checked if visited or not + * + * Note: check if the node pointed by `ptr` is already stored or + * not to avoid multiple store of the same node + */ +void * +stx_get_struct_visited_ptr (THREAD_ENTRY *thread_p, const void *ptr) +{ + int block_no; + int element_no; + XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); + + block_no = xasl_stream_get_ptr_block (ptr); + + if (xasl_unpack_info->ptr_lwm[block_no] <= 0) + { + return NULL; + } + + for (element_no = 0; element_no < xasl_unpack_info->ptr_lwm[block_no]; element_no++) + { + if (ptr == xasl_unpack_info->ptr_blocks[block_no][element_no].ptr) + { + return (xasl_unpack_info->ptr_blocks[block_no][element_no].str); + } + } + + return NULL; +} + +/* + * stx_free_visited_ptrs () - + * return: + * + * Note: free memory allocated to manage visited ptr constants + */ +void +stx_free_visited_ptrs (THREAD_ENTRY *thread_p) +{ + int i; + XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); + + for (i = 0; i < MAX_PTR_BLOCKS; i++) + { + xasl_unpack_info->ptr_lwm[i] = 0; + xasl_unpack_info->ptr_max[i] = 0; + if (xasl_unpack_info->ptr_blocks[i]) + { + db_private_free_and_init (thread_p, xasl_unpack_info->ptr_blocks[i]); + xasl_unpack_info->ptr_blocks[i] = (STX_VISITED_PTR *) 0; + } + } +} + +/* + * stx_alloc_struct () - + * return: + * size(in) : # of bytes of the node + * + * Note: allocate storage for structures pointed to from the xasl tree. + */ +char * +stx_alloc_struct (THREAD_ENTRY *thread_p, int size) +{ + char *ptr; + XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); + + if (!size) + { + return NULL; + } + + size = xasl_stream_make_align (size); /* alignment */ + if (size > xasl_unpack_info->alloc_size) + { + /* need to alloc */ + int p_size; + + p_size = MAX (size, xasl_unpack_info->packed_size); + p_size = xasl_stream_make_align (p_size); /* alignment */ + ptr = (char *) db_private_alloc (thread_p, p_size); + if (ptr == NULL) + { + return NULL; /* error */ + } + xasl_unpack_info->alloc_size = p_size; + xasl_unpack_info->alloc_buf = ptr; + if (xasl_unpack_info->track_allocated_bufers) + { + UNPACK_EXTRA_BUF *add_buff = NULL; + add_buff = (UNPACK_EXTRA_BUF *) db_private_alloc (thread_p, sizeof (UNPACK_EXTRA_BUF)); + if (add_buff == NULL) + { + db_private_free_and_init (thread_p, ptr); + return NULL; + } + add_buff->buff = ptr; + add_buff->next = NULL; + + if (xasl_unpack_info->additional_buffers == NULL) + { + xasl_unpack_info->additional_buffers = add_buff; + } + else + { + add_buff->next = xasl_unpack_info->additional_buffers; + xasl_unpack_info->additional_buffers = add_buff; + } + } + } + + /* consume alloced buffer */ + ptr = xasl_unpack_info->alloc_buf; + xasl_unpack_info->alloc_size -= size; + xasl_unpack_info->alloc_buf += size; + + return ptr; +} + +char * +stx_build_db_value (THREAD_ENTRY *thread_p, char *ptr, DB_VALUE *value) +{ + ptr = or_unpack_db_value (ptr, value); + + return ptr; +} + +char * +stx_build_string (THREAD_ENTRY *thread_p, char *ptr, char *string) +{ + int offset; + + ptr = or_unpack_int (ptr, &offset); + assert_release (offset > 0); + + (void) memcpy (string, ptr, offset); + ptr += offset; + + return ptr; +} + +char * +stx_restore_string (THREAD_ENTRY *thread_p, char *&ptr) +{ +#if !defined (CS_MODE) + char *string; + int length; + int offset = 0; + + ptr = or_unpack_int (ptr, &offset); + if (offset == 0) + { + return NULL; + } + + char *bufptr = &stx_get_xasl_unpack_info_ptr (thread_p)->packed_xasl[offset]; + if (ptr == NULL) + { + return NULL; + } + + string = (char *) stx_get_struct_visited_ptr (thread_p, bufptr); + if (string != NULL) + { + return string; + } + + length = OR_GET_INT (bufptr); + + if (length == -1) + { + /* unpack null-string */ + assert (string == NULL); + } + else + { + assert_release (length > 0); + + string = (char *) stx_alloc_struct (thread_p, length); + if (string == NULL) + { + stx_set_xasl_errcode (thread_p, ER_OUT_OF_VIRTUAL_MEMORY); + return NULL; + } + + if (stx_mark_struct_visited (thread_p, bufptr, string) == ER_FAILED + || stx_build_string (thread_p, bufptr, string) == NULL) + { + return NULL; + } + } + + return string; +#else // CS_MODE + int dummy; + ptr = or_unpack_int (ptr, &dummy); + return NULL; +#endif // CS_MODE +} + +char * +stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::column &jtc) +{ + int temp_int; + XASL_UNPACK_INFO *xasl_unpack_info = stx_get_xasl_unpack_info_ptr (thread_p); + + ptr = or_unpack_int (ptr, &temp_int); + jtc.m_function = (json_table_column_function) temp_int; + + stx_restore (thread_p, ptr, jtc.m_output_value_pointer); + + if (jtc.m_function == JSON_TABLE_ORDINALITY) + { + jtc.m_domain = &tp_Integer_domain; + return ptr; + } + + ptr = or_unpack_domain (ptr, &jtc.m_domain, NULL); + + jtc.m_path = stx_restore_string (thread_p, ptr); + jtc.m_column_name = stx_restore_string (thread_p, ptr); + + if (jtc.m_function == JSON_TABLE_EXISTS) + { + return ptr; + } + + ptr = stx_unpack (thread_p, ptr, jtc.m_on_error); + ptr = stx_unpack (thread_p, ptr, jtc.m_on_empty); + + return ptr; +} + +char * +stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::node &jtn) +{ + int temp_int = 0; + + jtn.m_iterator = nullptr; + + jtn.m_path = stx_restore_string (thread_p, ptr); + + ptr = or_unpack_int (ptr, &temp_int); + jtn.m_output_columns_size = (size_t) temp_int; + if (jtn.m_output_columns_size > 0) + { + jtn.m_output_columns = + (json_table_column *) stx_alloc_struct (thread_p, (int) (sizeof (json_table_column) * jtn.m_output_columns_size)); + for (size_t i = 0; i < jtn.m_output_columns_size; ++i) + { + jtn.m_output_columns[i].init (); + ptr = stx_build (thread_p, ptr, jtn.m_output_columns[i]); + } + } + + ptr = or_unpack_int (ptr, &temp_int); + jtn.m_nested_nodes_size = (size_t) temp_int; + if (jtn.m_nested_nodes_size > 0) + { + jtn.m_nested_nodes = + (json_table_node *) stx_alloc_struct (thread_p, (int) (sizeof (json_table_node) * jtn.m_nested_nodes_size)); + for (size_t i = 0; i < jtn.m_nested_nodes_size; ++i) + { + jtn.m_nested_nodes[i].init (); + ptr = stx_build (thread_p, ptr, jtn.m_nested_nodes[i]); + } + } + + ptr = or_unpack_int (ptr, &temp_int); + jtn.m_id = (size_t) temp_int; + + ptr = or_unpack_int (ptr, &temp_int); + jtn.m_expand_type = (json_table_expand_type) temp_int; + + return ptr; +} + +char * +stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::spec_node &json_table_spec) +{ + json_table_spec.init (); + + int node_count; + ptr = or_unpack_int (ptr, &node_count); + json_table_spec.m_node_count = (size_t) (node_count); + + stx_restore (thread_p, ptr, json_table_spec.m_json_reguvar); + + stx_alloc (thread_p, json_table_spec.m_root_node); + assert (json_table_spec.m_root_node != NULL); + + json_table_spec.m_root_node->init (); + ptr = stx_build (thread_p, ptr, *json_table_spec.m_root_node); + + return ptr; +} + +char * +stx_build (THREAD_ENTRY *thread_p, char *ptr, db_value &val) +{ + return stx_build_db_value (thread_p, ptr, &val); +} + +char * +stx_unpack (THREAD_ENTRY *thread_p, char *ptr, json_table_column_behavior &behavior) +{ + int temp; + + ptr = or_unpack_int (ptr, &temp); + behavior.m_behavior = (json_table_column_behavior_type) temp; + + if (behavior.m_behavior == JSON_TABLE_DEFAULT_VALUE) + { + behavior.m_default_value = (DB_VALUE *) stx_alloc_struct (thread_p, sizeof (DB_VALUE)); + ptr = stx_build (thread_p, ptr, *behavior.m_default_value); + } + + return ptr; +} + +bool +xasl_stream_compare (const cubxasl::json_table::column &first, const cubxasl::json_table::column &second) +{ + if (first.m_function != second.m_function) + { + return false; + } + + return true; +} + +bool +xasl_stream_compare (const cubxasl::json_table::node &first, const cubxasl::json_table::node &second) +{ + if (first.m_output_columns_size != second.m_output_columns_size) + { + return false; + } + + if (first.m_nested_nodes_size != second.m_nested_nodes_size) + { + return false; + } + + if (first.m_id != second.m_id) + { + return false; + } + + if (first.m_expand_type != second.m_expand_type) + { + return false; + } + + return true; +} + +bool +xasl_stream_compare (const cubxasl::json_table::spec_node &first, const cubxasl::json_table::spec_node &second) +{ + if (first.m_node_count != second.m_node_count) + { + return false; + } + return true; +} diff --git a/src/xasl/xasl_stream.hpp b/src/xasl/xasl_stream.hpp new file mode 100644 index 00000000000..7d279b2858a --- /dev/null +++ b/src/xasl/xasl_stream.hpp @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// +// XASL stream - common interface for xasl_to_stream and stream_to_xasl +// + +#ifndef _XASL_STREAM_HPP_ +#define _XASL_STREAM_HPP_ + +#include "thread_compat.hpp" +#include "xasl.h" + +// forward def +struct db_value; +struct regu_variable_node; + +const std::size_t MAX_PTR_BLOCKS = 256; +const std::size_t OFFSETS_PER_BLOCK = 4096; +const std::size_t START_PTR_PER_BLOCK = 15; +/* + * the linear byte stream for store the given XASL tree is allocated + * and expanded dynamically on demand by the following amount of bytes + */ +const std::size_t STREAM_EXPANSION_UNIT = OFFSETS_PER_BLOCK * sizeof (int); + +const int XASL_STREAM_ALIGN_UNIT = sizeof (double); +const int XASL_STREAM_ALIGN_MASK = XASL_STREAM_ALIGN_UNIT - 1; + +/* structure of a visited pointer constant */ +typedef struct visited_ptr STX_VISITED_PTR; +struct visited_ptr +{ + const void *ptr; /* a pointer constant */ + void *str; /* where the struct pointed by 'ptr' is stored */ +}; + +/* structure for additional memory during filtered predicate unpacking */ +typedef struct unpack_extra_buf UNPACK_EXTRA_BUF; +struct unpack_extra_buf +{ + char *buff; + UNPACK_EXTRA_BUF *next; +}; + +/* structure to hold information needed during packing */ +typedef struct xasl_unpack_info XASL_UNPACK_INFO; +struct xasl_unpack_info +{ + char *packed_xasl; /* ptr to packed xasl tree */ +#if defined (SERVER_MODE) + THREAD_ENTRY *thrd; /* used for private allocation */ +#endif /* SERVER_MODE */ + + /* blocks of visited pointer constants */ + STX_VISITED_PTR *ptr_blocks[MAX_PTR_BLOCKS]; + + char *alloc_buf; /* alloced buf */ + + int packed_size; /* packed xasl tree size */ + + /* low-water-mark of visited pointers */ + int ptr_lwm[MAX_PTR_BLOCKS]; + + /* max number of visited pointers */ + int ptr_max[MAX_PTR_BLOCKS]; + + int alloc_size; /* alloced buf size */ + + /* list of additional buffers allocated during xasl unpacking */ + UNPACK_EXTRA_BUF *additional_buffers; + /* 1 if additional buffers should be tracked */ + int track_allocated_bufers; + + bool use_xasl_clone; /* true, if uses xasl clone */ +}; + +inline int xasl_stream_make_align (int x); +inline int xasl_stream_get_ptr_block (const void *ptr); + +int stx_get_xasl_errcode (THREAD_ENTRY *thread_p); +void stx_set_xasl_errcode (THREAD_ENTRY *thread_p, int errcode); +XASL_UNPACK_INFO *stx_get_xasl_unpack_info_ptr (THREAD_ENTRY *thread_p); +void stx_set_xasl_unpack_info_ptr (THREAD_ENTRY *thread_p, XASL_UNPACK_INFO *ptr); +int stx_init_xasl_unpack_info (THREAD_ENTRY *thread_p, char *xasl_stream, int xasl_stream_size); + +int stx_mark_struct_visited (THREAD_ENTRY *thread_p, const void *ptr, void *str); +void *stx_get_struct_visited_ptr (THREAD_ENTRY *thread_p, const void *ptr); +void stx_free_visited_ptrs (THREAD_ENTRY *thread_p); +char *stx_alloc_struct (THREAD_ENTRY *thread_p, int size); + +// all stx_build overloads +char *stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::spec_node &jts); +char *stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::column &jtc); +char *stx_build (THREAD_ENTRY *thread_p, char *ptr, cubxasl::json_table::node &jtn); +// next stx_build functions are not ported to xasl_stream.cpp and cannot be used for debug checks +char *stx_build (THREAD_ENTRY *thread_p, char *ptr, db_value &val); +char *stx_build (THREAD_ENTRY *thread_p, char *ptr, regu_variable_node ®uvar); + +// dependencies not ported +char *stx_build_db_value (THREAD_ENTRY *thread_p, char *tmp, DB_VALUE *ptr); +char *stx_build_string (THREAD_ENTRY *thread_p, char *tmp, char *ptr); + +// restore string; return restored string, updates stream pointer +char *stx_restore_string (THREAD_ENTRY *thread_p, char *&ptr); + +// all stx_unpack overloads; equivalent to stx_build, but never used for stx_restore +char *stx_unpack (THREAD_ENTRY *thread_p, char *tmp, json_table_column_behavior &behavior); + +// xasl stream compare functions - used for debugging to compare originals and packed/unpacked objects +bool xasl_stream_compare (const cubxasl::json_table::column &first, const cubxasl::json_table::column &second); +bool xasl_stream_compare (const cubxasl::json_table::node &first, const cubxasl::json_table::node &second); +bool xasl_stream_compare (const cubxasl::json_table::spec_node &first, const cubxasl::json_table::spec_node &second); + +template +static void stx_alloc (THREAD_ENTRY *thread_p, T *&ptr); +template +static void stx_alloc_array (THREAD_ENTRY *thread_p, T *&ptr, std::size_t count); + +template +void stx_restore (THREAD_ENTRY *thread_p, char *&ptr, T *&target); + +////////////////////////////////////////////////////////////////////////// +// Template and inline implementation +////////////////////////////////////////////////////////////////////////// +int +xasl_stream_make_align (int x) +{ + return (((x) & ~XASL_STREAM_ALIGN_MASK) + (((x) & XASL_STREAM_ALIGN_MASK) ? XASL_STREAM_ALIGN_UNIT : 0)); +} + +int +xasl_stream_get_ptr_block (const void *ptr) +{ + return static_cast ((((UINTPTR) ptr) / sizeof (UINTPTR)) % MAX_PTR_BLOCKS); +} + +// restore from stream buffer +// +// template T should have an overload of stx_build. +// +// if you want to prevent saving to and restoring from stream buffer, use stx_unpack instead of stx_build. +// +template +static void +stx_restore (THREAD_ENTRY *thread_p, char *&ptr, T *&target) +{ +#if !defined (CS_MODE) + int offset; + + ptr = or_unpack_int (ptr, &offset); + if (offset == 0) + { + target = NULL; + } + else + { + char *bufptr = &stx_get_xasl_unpack_info_ptr (thread_p)->packed_xasl[offset]; + target = (T *) stx_get_struct_visited_ptr (thread_p, bufptr); + if (target != NULL) + { + return; + } + if (stx_mark_struct_visited (thread_p, bufptr, target) != NO_ERROR) + { + assert (false); + return; + } + target = (T *) stx_alloc_struct (thread_p, (int) sizeof (T)); + if (target == NULL) + { + assert (false); + return; + } + + if (stx_build (thread_p, bufptr, *target) == NULL) + { + assert (false); + } + } +#else // CS_MODE + // NOTE - in CS_MODE, we only need to do some debug checks and we don't have to do actual restoring + int dummy; + ptr = or_unpack_int (ptr, &dummy); + target = NULL; +#endif // CS_MODE +} + +template +void stx_alloc (THREAD_ENTRY *thread_p, T *&ptr) +{ + ptr = (T *) stx_alloc_struct (thread_p, (int) sizeof (T)); +} + +template +static void stx_alloc_array (THREAD_ENTRY *thread_p, T *&ptr, std::size_t count) +{ + ptr = (T *) stx_alloc_struct (thread_p, (int) (count * sizeof (T))); +} + +#endif // _XASL_STREAM_HPP_ diff --git a/unit_tests/replication/test_log_generator.cpp b/unit_tests/replication/test_log_generator.cpp index 2e5b40ece86..cb240185b28 100644 --- a/unit_tests/replication/test_log_generator.cpp +++ b/unit_tests/replication/test_log_generator.cpp @@ -175,7 +175,7 @@ namespace test_replication cubreplication::log_generator::create_stream (0); cubreplication::log_generator *lg = - new cubreplication::log_generator (cubreplication::log_generator::get_stream ()); + new cubreplication::log_generator (cubreplication::log_generator::get_stream ()); lg->append_repl_object (sbr1); lg->append_repl_object (rbr1); @@ -280,7 +280,7 @@ namespace test_replication }; - std::atomic tasks_running(0); + std::atomic tasks_running (0); class gen_repl_task : public cubthread::entry_task { @@ -288,7 +288,7 @@ namespace test_replication gen_repl_task (int tran_id) { m_thread_entry.tran_index = tran_id; - m_lg = new cubreplication::log_generator (cubreplication::log_generator::get_stream ()); + m_lg = new cubreplication::log_generator (cubreplication::log_generator::get_stream ()); } void execute (cubthread::entry &thread_ref) override @@ -331,7 +331,7 @@ namespace test_replication int res = 0; init_common_cubrid_modules (); - + cubreplication::log_generator *lg = new cubreplication::log_generator; cubreplication::log_generator::create_stream (0); diff --git a/unit_tests/stream/test_stream.cpp b/unit_tests/stream/test_stream.cpp index 2b9d6fbde36..0593cb13669 100644 --- a/unit_tests/stream/test_stream.cpp +++ b/unit_tests/stream/test_stream.cpp @@ -771,13 +771,13 @@ namespace test_stream { std::this_thread::sleep_for (std::chrono::microseconds (100)); - float stream_fill_factor = stream_context_manager::g_stream->stream_fill_factor (); - if (stream_fill_factor < 0.45f && stream_context_manager::g_pause_packer) - { + float stream_fill_factor = stream_context_manager::g_stream->stream_fill_factor (); + if (stream_fill_factor < 0.45f && stream_context_manager::g_pause_packer) + { std::cout << " stream_pack_task : need resume producing; stream_fill_factor: " << stream_fill_factor << std::endl; stream_context_manager::g_pause_packer = false; - } + } if (stream_context_manager::g_stop_packer) { @@ -878,7 +878,7 @@ namespace test_stream stream_context_manager::g_unpacked_entries_cnt++; - stream_context_manager::update_stream_drop_position (); + stream_context_manager::update_stream_drop_position (); } std::cout << " End of unpacking thread " << std::endl; @@ -926,7 +926,7 @@ namespace test_stream stream_context_manager::g_read_positions[m_reader_id] = my_curr_pos; - stream_context_manager::update_stream_drop_position (); + stream_context_manager::update_stream_drop_position (); //std::this_thread::sleep_for (std::chrono::microseconds (10)); } @@ -958,15 +958,15 @@ namespace test_stream cubstream::stream_position stream_context_manager::g_read_positions[200]; void stream_context_manager::update_stream_drop_position (void) - { - cubstream::stream_position drop_pos = stream_context_manager::g_stream->get_curr_read_position (); + { + cubstream::stream_position drop_pos = stream_context_manager::g_stream->get_curr_read_position (); - for (int j = 0; j < stream_context_manager::g_read_byte_threads; j++) - { - drop_pos = MIN (drop_pos, stream_context_manager::g_read_positions[j]); - } - stream_context_manager::g_stream->set_last_dropable_pos (drop_pos); - } + for (int j = 0; j < stream_context_manager::g_read_byte_threads; j++) + { + drop_pos = MIN (drop_pos, stream_context_manager::g_read_positions[j]); + } + stream_context_manager::g_stream->set_last_dropable_pos (drop_pos); + } class stream_producer_throttling @@ -990,7 +990,7 @@ namespace test_stream std::cout << " Stream producer throttled position: " << pos << " bytes: " << byte_count << std::endl; stream_context_manager::g_pause_packer = true; - stream_context_manager::update_stream_drop_position (); + stream_context_manager::update_stream_drop_position (); } return NO_ERROR; diff --git a/win/cubridcs/cubridcs.def b/win/cubridcs/cubridcs.def index 5ca97974a9c..27ea9c5b853 100644 --- a/win/cubridcs/cubridcs.def +++ b/win/cubridcs/cubridcs.def @@ -953,3 +953,4 @@ EXPORTS ; setobj_type db_get_json_raw_body + db_json_val_from_str