mirror of
https://github.com/curl/curl.git
synced 2026-04-21 10:22:32 +03:00
Before this patch curl used the C preprocessor to override standard memory allocation symbols: malloc, calloc, strdup, realloc, free. The goal of these is to replace them with curl's debug wrappers in `CURLDEBUG` builds, another was to replace them with the wrappers calling user-defined allocators in libcurl. This solution needed a bunch of workarounds to avoid breaking external headers: it relied on include order to do the overriding last. For "unity" builds it needed to reset overrides before external includes. Also in test apps, which are always built as single source files. It also needed the `(symbol)` trick to avoid overrides in some places. This would still not fix cases where the standard symbols were macros. It was also fragile and difficult to figure out which was the actual function behind an alloc or free call in a specific piece of code. This in turn caused bugs where the wrong allocator was accidentally called. To avoid these problems, this patch replaces this solution with `curlx_`-prefixed allocator macros, and mapping them _once_ to either the libcurl wrappers, the debug wrappers or the standard ones, matching the rest of the code in libtests. This concludes the long journey to avoid redefining standard functions in the curl codebase. Note: I did not update `packages/OS400/*.c` sources. They did not `#include` `curl_setup.h`, `curl_memory.h` or `memdebug.h`, meaning the overrides were never applied to them. This may or may not have been correct. For now I suppressed the direct use of standard allocators via a local `.checksrc`. Probably they (except for `curlcl.c`) should be updated to include `curl_setup.h` and use the `curlx_` macros. This patch changes mappings in two places: - `lib/curl_threads.c` in libtests: Before this patch it mapped to libcurl allocators. After, it maps to standard allocators, like the rest of libtests code. - `units`: before this patch it mapped to standard allocators. After, it maps to libcurl allocators. Also: - drop all position-dependent `curl_memory.h` and `memdebug.h` includes, and delete the now unnecessary headers. - rename `Curl_tcsdup` macro to `curlx_tcsdup` and define like the other allocators. - map `curlx_strdup()` to `_strdup()` on Windows (was: `strdup()`). To fix warnings silenced via `_CRT_NONSTDC_NO_DEPRECATE`. - multibyte: map `curlx_convert_*()` to `_strdup()` on Windows (was: `strdup()`). - src: do not reuse the `strdup` name for the local replacement. - lib509: call `_strdup()` on Windows (was: `strdup()`). - test1132: delete test obsoleted by this patch. - CHECKSRC.md: update text for `SNPRINTF`. - checksrc: ban standard allocator symbols. Follow-up tob12da22db1#18866 Follow-up todb98daab05#18844 Follow-up to4deea9396b#18814 Follow-up to9678ff5b1b#18776 Follow-up to10bac43b87#18774 Follow-up to20142f5d06#18634 Follow-up tobf7375ecc5#18503 Follow-up to9863599d69#18502 Follow-up to3bb5e58c10#17827 Closes #19626
179 lines
7 KiB
C
179 lines
7 KiB
C
/***************************************************************************
|
|
* _ _ ____ _
|
|
* Project ___| | | | _ \| |
|
|
* / __| | | | |_) | |
|
|
* | (__| |_| | _ <| |___
|
|
* \___|\___/|_| \_\_____|
|
|
*
|
|
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
*
|
|
* This software is licensed as described in the file COPYING, which
|
|
* you should have received as part of this distribution. The terms
|
|
* are also available at https://curl.se/docs/copyright.html.
|
|
*
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
* copies of the Software, and permit persons to whom the Software is
|
|
* furnished to do so, under the terms of the COPYING file.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
* SPDX-License-Identifier: curl
|
|
*
|
|
***************************************************************************/
|
|
#include "unitcheck.h"
|
|
|
|
#include "hash.h"
|
|
|
|
static const size_t slots = 3;
|
|
|
|
static void t1603_mydtor(void *p)
|
|
{
|
|
/* Data are statically allocated */
|
|
(void)p;
|
|
}
|
|
|
|
static size_t elem_dtor_calls;
|
|
|
|
static void my_elem_dtor(void *key, size_t key_len, void *p)
|
|
{
|
|
(void)p;
|
|
(void)key;
|
|
(void)key_len;
|
|
++elem_dtor_calls;
|
|
}
|
|
|
|
static CURLcode t1603_setup(struct Curl_hash *hash_static)
|
|
{
|
|
Curl_hash_init(hash_static, slots, Curl_hash_str,
|
|
curlx_str_key_compare, t1603_mydtor);
|
|
return CURLE_OK;
|
|
}
|
|
|
|
static void t1603_stop(struct Curl_hash *hash_static)
|
|
{
|
|
Curl_hash_destroy(hash_static);
|
|
}
|
|
|
|
static CURLcode test_unit1603(const char *arg)
|
|
{
|
|
struct Curl_hash hash_static;
|
|
|
|
UNITTEST_BEGIN(t1603_setup(&hash_static))
|
|
|
|
char key1[] = "key1";
|
|
char key2[] = "key2b";
|
|
char key3[] = "key3";
|
|
char key4[] = "key4";
|
|
char notakey[] = "notakey";
|
|
char *nodep;
|
|
int rc;
|
|
|
|
/* Ensure the key hashes are as expected in order to test both hash
|
|
collisions and a full table. Unfortunately, the hashes can vary
|
|
between architectures. */
|
|
if(Curl_hash_str(key1, strlen(key1), slots) != 1 ||
|
|
Curl_hash_str(key2, strlen(key2), slots) != 0 ||
|
|
Curl_hash_str(key3, strlen(key3), slots) != 2 ||
|
|
Curl_hash_str(key4, strlen(key4), slots) != 1)
|
|
curl_mfprintf(stderr,
|
|
"Warning: hashes are not computed as expected on this "
|
|
"architecture; test coverage will be less comprehensive\n");
|
|
|
|
nodep = Curl_hash_add(&hash_static, &key1, strlen(key1), &key1);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(nodep == key1, "hash retrieval failed");
|
|
|
|
nodep = Curl_hash_add(&hash_static, &key2, strlen(key2), &key2);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
|
|
fail_unless(nodep == key2, "hash retrieval failed");
|
|
|
|
nodep = Curl_hash_add(&hash_static, &key3, strlen(key3), &key3);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
|
|
fail_unless(nodep == key3, "hash retrieval failed");
|
|
|
|
/* The fourth element exceeds the number of slots & collides */
|
|
nodep = Curl_hash_add(&hash_static, &key4, strlen(key4), &key4);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(nodep == key4, "hash retrieval failed");
|
|
|
|
/* Make sure all elements are still accessible */
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(nodep == key1, "hash retrieval failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
|
|
fail_unless(nodep == key2, "hash retrieval failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
|
|
fail_unless(nodep == key3, "hash retrieval failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(nodep == key4, "hash retrieval failed");
|
|
|
|
/* Delete the second of two entries in a bucket */
|
|
rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
|
|
fail_unless(rc == 0, "hash delete failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(nodep == key1, "hash retrieval failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(!nodep, "hash retrieval should have failed");
|
|
|
|
/* Insert that deleted node again */
|
|
nodep = Curl_hash_add(&hash_static, &key4, strlen(key4), &key4);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(nodep == key4, "hash retrieval failed");
|
|
|
|
/* Delete the first of two entries in a bucket */
|
|
rc = Curl_hash_delete(&hash_static, &key1, strlen(key1));
|
|
fail_unless(rc == 0, "hash delete failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(!nodep, "hash retrieval should have failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(nodep == key4, "hash retrieval failed");
|
|
|
|
/* Delete the remaining one of two entries in a bucket */
|
|
rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
|
|
fail_unless(rc == 0, "hash delete failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(!nodep, "hash retrieval should have failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
|
|
fail_unless(!nodep, "hash retrieval should have failed");
|
|
|
|
/* Delete an already deleted node */
|
|
rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
|
|
fail_unless(rc, "hash delete should have failed");
|
|
|
|
/* Replace an existing node */
|
|
nodep = Curl_hash_add(&hash_static, &key1, strlen(key1), ¬akey);
|
|
fail_unless(nodep, "insertion into hash failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
|
|
fail_unless(nodep == notakey, "hash retrieval failed");
|
|
|
|
/* Make sure all remaining elements are still accessible */
|
|
nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
|
|
fail_unless(nodep == key2, "hash retrieval failed");
|
|
nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
|
|
fail_unless(nodep == key3, "hash retrieval failed");
|
|
|
|
/* Add element with own destructor */
|
|
nodep = Curl_hash_add2(&hash_static, &key1, strlen(key1), &key1,
|
|
my_elem_dtor);
|
|
fail_unless(nodep, "add2 insertion into hash failed");
|
|
fail_unless(elem_dtor_calls == 0, "element destructor count should be 0");
|
|
/* Add it again, should invoke destructor on first */
|
|
nodep = Curl_hash_add2(&hash_static, &key1, strlen(key1), &key1,
|
|
my_elem_dtor);
|
|
fail_unless(nodep, "add2 again, insertion into hash failed");
|
|
fail_unless(elem_dtor_calls == 1, "element destructor count should be 1");
|
|
/* remove, should invoke destructor */
|
|
rc = Curl_hash_delete(&hash_static, &key1, strlen(key1));
|
|
fail_unless(rc == 0, "hash delete failed");
|
|
fail_unless(elem_dtor_calls == 2, "element destructor count should be 1");
|
|
|
|
/* Clean up */
|
|
Curl_hash_clean(&hash_static);
|
|
|
|
UNITTEST_END(t1603_stop(&hash_static))
|
|
}
|