mariadb/sql/item_vectorfunc.cc

217 lines
5.8 KiB
C++
Raw Permalink Normal View History

2023-11-25 14:58:06 +01:00
/* Copyright (c) 2023, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
/**
@file
@brief
This file defines all vector functions
*/
Initial HNSW implementation This commit includes the work done in collaboration with Hugo Wen from Amazon: MDEV-33408 Alter HNSW graph storage and fix memory leak This commit changes the way HNSW graph information is stored in the second table. Instead of storing connections as separate records, it now stores neighbors for each node, leading to significant performance improvements and storage savings. Comparing with the previous approach, the insert speed is 5 times faster, search speed improves by 23%, and storage usage is reduced by 73%, based on ann-benchmark tests with random-xs-20-euclidean and random-s-100-euclidean datasets. Additionally, in previous code, vector objects were not released after use, resulting in excessive memory consumption (over 20GB for building the index with 90,000 records), preventing tests with large datasets. Now ensure that vectors are released appropriately during the insert and search functions. Note there are still some vectors that need to be cleaned up after search query completion. Needs to be addressed in a future commit. All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. As well as the commit: Introduce session variables to manage HNSW index parameters Three variables: hnsw_max_connection_per_layer hnsw_ef_constructor hnsw_ef_search ann-benchmark tool is also updated to support these variables in commit https://github.com/HugoWenTD/ann-benchmarks/commit/e09784e for branch https://github.com/HugoWenTD/ann-benchmarks/tree/mariadb-configurable All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. Co-authored-by: Hugo Wen <wenhug@amazon.com>
2024-02-17 17:03:30 +02:00
#include "item_vectorfunc.h"
2024-08-29 20:49:54 +02:00
#include "vector_mhnsw.h"
#include "sql_type_vector.h"
2023-11-25 14:58:06 +01:00
key_map Item_func_vec_distance_common::part_of_sortkey() const
{
key_map map(0);
if (Item_field *item= get_field_arg())
{
Field *f= item->field;
2024-08-29 20:49:54 +02:00
KEY *keyinfo= f->table->s->key_info;
for (uint i= f->table->s->keys; i < f->table->s->total_keys; i++)
2024-08-29 20:49:54 +02:00
if (keyinfo[i].algorithm == HA_KEY_ALG_VECTOR && f->key_start.is_set(i)
&& mhnsw_uses_distance(f->table, keyinfo + i, this))
map.set_bit(i);
}
return map;
}
2023-11-25 14:58:06 +01:00
double Item_func_vec_distance_common::val_real()
2023-11-25 14:58:06 +01:00
{
String *r1= args[0]->val_str();
String *r2= args[1]->val_str();
null_value= !r1 || !r2 || r1->length() != r2->length() ||
r1->length() % sizeof(float);
if (null_value)
return 0;
float *v1= (float *) r1->ptr();
float *v2= (float *) r2->ptr();
return calc_distance(v1, v2, (r1->length()) / sizeof(float));
Initial HNSW implementation This commit includes the work done in collaboration with Hugo Wen from Amazon: MDEV-33408 Alter HNSW graph storage and fix memory leak This commit changes the way HNSW graph information is stored in the second table. Instead of storing connections as separate records, it now stores neighbors for each node, leading to significant performance improvements and storage savings. Comparing with the previous approach, the insert speed is 5 times faster, search speed improves by 23%, and storage usage is reduced by 73%, based on ann-benchmark tests with random-xs-20-euclidean and random-s-100-euclidean datasets. Additionally, in previous code, vector objects were not released after use, resulting in excessive memory consumption (over 20GB for building the index with 90,000 records), preventing tests with large datasets. Now ensure that vectors are released appropriately during the insert and search functions. Note there are still some vectors that need to be cleaned up after search query completion. Needs to be addressed in a future commit. All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. As well as the commit: Introduce session variables to manage HNSW index parameters Three variables: hnsw_max_connection_per_layer hnsw_ef_constructor hnsw_ef_search ann-benchmark tool is also updated to support these variables in commit https://github.com/HugoWenTD/ann-benchmarks/commit/e09784e for branch https://github.com/HugoWenTD/ann-benchmarks/tree/mariadb-configurable All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. Co-authored-by: Hugo Wen <wenhug@amazon.com>
2024-02-17 17:03:30 +02:00
}
bool Item_func_vec_totext::fix_length_and_dec(THD *thd)
{
decimals= 0;
max_length= ((args[0]->max_length / 4) *
(MAX_FLOAT_STR_LENGTH + 1 /* comma */)) + 2 /* braces */;
fix_length_and_charset(max_length, default_charset());
set_maybe_null();
return false;
}
String *Item_func_vec_totext::val_str_ascii(String *str)
{
String *r1= args[0]->val_str();
if ((null_value= args[0]->null_value))
return nullptr;
// Wrong size returns null
if (r1->length() % 4)
{
THD *thd= current_thd;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VECTOR_BINARY_FORMAT_INVALID,
ER_THD(thd, ER_VECTOR_BINARY_FORMAT_INVALID));
null_value= true;
return nullptr;
}
str->length(0);
str->set_charset(&my_charset_numeric);
str->reserve(r1->length() / 4 * (MAX_FLOAT_STR_LENGTH + 1) + 2);
str->append('[');
const char *ptr= r1->ptr();
for (size_t i= 0; i < r1->length(); i+= 4)
{
float val= get_float(ptr);
if (std::isinf(val))
if (val < 0)
str->append(STRING_WITH_LEN("-Inf"));
else
str->append(STRING_WITH_LEN("Inf"));
else if (std::isnan(val))
str->append(STRING_WITH_LEN("NaN"));
else
{
char buf[MAX_FLOAT_STR_LENGTH+1];
size_t l= my_gcvt(val, MY_GCVT_ARG_FLOAT, MAX_FLOAT_STR_LENGTH, buf, 0);
str->append(buf, l);
}
ptr+= 4;
if (r1->length() - i > 4)
str->append(',');
}
str->append(']');
return str;
}
Item_func_vec_totext::Item_func_vec_totext(THD *thd, Item *a)
: Item_str_ascii_checksum_func(thd, a)
{
}
Item_func_vec_fromtext::Item_func_vec_fromtext(THD *thd, Item *a)
: Item_str_func(thd, a)
{
}
bool Item_func_vec_fromtext::fix_length_and_dec(THD *thd)
{
decimals= 0;
/* Worst case scenario, for a valid input we have a string of the form:
[1,2,3,4,5,...] single digit numbers.
This means we can have (max_length - 1) / 2 floats.
Each float takes 4 bytes, so we do (max_length - 1) * 2. */
fix_length_and_charset((args[0]->max_length - 1) * 2, &my_charset_bin);
set_maybe_null();
return false;
}
String *Item_func_vec_fromtext::val_str(String *buf)
{
json_engine_t je;
bool end_ok= false;
String *value = args[0]->val_json(&tmp_js);
if ((null_value= !value))
return nullptr;
buf->length(0);
CHARSET_INFO *cs= value->charset();
const uchar *start= reinterpret_cast<const uchar *>(value->ptr());
const uchar *end= start + value->length();
if (json_scan_start(&je, cs, start, end) ||
json_read_value(&je))
goto error;
if (je.value_type != JSON_VALUE_ARRAY)
goto error_format;
/* Accept only arrays of floats. */
do {
switch (je.state)
{
case JST_ARRAY_START:
continue;
case JST_ARRAY_END:
end_ok = true;
break;
case JST_VALUE:
{
if (json_read_value(&je))
goto error;
if (je.value_type != JSON_VALUE_NUMBER)
goto error_format;
int error;
char *start= (char *)je.value_begin, *end;
float f= (float)cs->strntod(start, je.value_len, &end, &error);
if (unlikely(error))
goto error_format;
char f_bin[4];
float4store(f_bin, f);
buf->append(f_bin, sizeof(f_bin));
break;
}
default:
goto error_format;
}
} while (json_scan_next(&je) == 0);
if (!end_ok)
goto error_format;
if (Type_handler_vector::is_valid(buf->ptr(), buf->length()))
return buf;
null_value= true;
push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE),
"vector", value->c_ptr_safe());
return nullptr;
error_format:
{
int position= (int)((const char *) je.s.c_str - value->ptr());
null_value= true;
push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_VECTOR_FORMAT_INVALID, ER(ER_VECTOR_FORMAT_INVALID),
position, value->c_ptr_safe());
return nullptr;
}
error:
report_json_error_ex(value->ptr(), &je, func_name(),
0, Sql_condition::WARN_LEVEL_WARN);
null_value= true;
return nullptr;
}