I there, this multithreading code is the essence of my usecase and I have been testing with LLVM ThreadSanitizer (I already build libc++ with fsanitize=thread
to the sanitizer work properly). The thing is that TSan claim there is a data race but I don't understand why, because I think I'm doing every read and write atomic.
TSan says that the shared_ptr
deletes my object when other thread is accessing to the object, but why? It is not supposed to be my access should be safe due atomic_load
?
Another thing that I found, is that when I change the constructors in this form it does appease TSan. I don't understand why this fix the race
Node(Node&& other) : name(std::move(other.name)) { } // Apparently fix the race
Node(const Node& other) : name(other.name) { } // Apparently fix the race
So my questions are:
- This is a real data race or a false positive?
- If is a real race, why races?
- If is a real race, why change the form of the constructors "fix" the race
I am still learning how std::atomic
works so thanks for correcting me if I miss something
This is the code: the code and the TSan Traceback files here (It is more clear see the Tsan Traceback and follow the code in the link)
#include <atomic>
#include <chrono>
#include <iostream>
#include <thread>
#include <memory>
#include <vector>
std::atomic_int c(0);
struct Node {
std::string name;
Node() : name() { }
// move and copy constructors, that apparently race
Node(Node&& other) {
name = std::move(other.name);
}
Node(const Node& other) {
name = other.name;
}
// move assignment
Node& operator=(Node&& other) {
name = std::move(other.name);
return *this;
}
Node& operator=(const Node& other) {
name = other.name;
return *this;
}
};
std::shared_ptr<const Node> local_node(std::make_shared<const Node>());
void read_node() {
auto start = std::chrono::system_clock::now();
do {
{
auto local_node_ = std::atomic_load(&local_node);
std::string str = local_node_->name;
}
} while ((std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - start)).count() < 1);
}
void write_node() {
auto start = std::chrono::system_clock::now();
do {
{
auto local_node_ = std::atomic_load(&local_node);
auto node_copy = std::make_unique<Node>(*local_node_);
node_copy->name.assign(std::to_string(c.load()));
auto loca_ = std::shared_ptr<const Node>(node_copy.release());
std::atomic_store(&local_node, loca_);
}
c++;
} while ((std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - start)).count() < 1);
}
int main() {
std::vector<std::thread> w;
std::vector<std::thread> r;
int thread_size = 10;
while (thread_size > 0) {
w.emplace_back(write_node);
r.emplace_back(read_node);
--thread_size;
}
for (auto& thread : w) {
if (thread.joinable()) {
thread.join();
}
}
for (auto& thread : r) {
if (thread.joinable()) {
thread.join();
}
}
std::cout << local_node->name << std::endl;
}
This is what TSan says:
WARNING: ThreadSanitizer: data race (pid=18599)
Write of size 8 at 0x7d080000dfa0 by thread T12:
#0 operator delete(void*) /home/development/llvm/3.8.0/final/http://ift.tt/1sMfPNj (atomic_access+0x0000000a82ab)
#1 std::__1::default_delete<Node>::operator()(Node*) const /usr/local/bin/../include/c++/v1/memory:2529:13 (atomic_access+0x0000000aa1d3)
#2 std::__1::__shared_ptr_pointer<Node*, std::__1::default_delete<Node>, std::__1::allocator<Node> >::__on_zero_shared() /usr/local/bin/../include/c++/v1/memory:3779 (atomic_access+0x0000000aa1d3)
#3 std::__1::__shared_weak_count::__release_shared() <null> (libc++.so.1+0x00000004c4f8)
#4 read_node() /home/yosef/Desktop/Xapiand/src/atomic_access.cc:51:5 (atomic_access+0x0000000a96f6)
#5 _ZNSt3__18__invokeIPFvvEJEEEDTclclsr3std3__1E7forwardIT_Efp_Espclsr3std3__1E7forwardIT0_Efp0_EEEOS3_DpOS4_ /usr/local/bin/../include/c++/v1/__functional_base:365:12 (atomic_access+0x0000000aa49a)
#6 void std::__1::__thread_execute<void (*)()>(std::__1::tuple<void (*)()>&, std::__1::__tuple_indices<>) /usr/local/bin/../include/c++/v1/thread:347 (atomic_access+0x0000000aa49a)
#7 void* std::__1::__thread_proxy<std::__1::tuple<void (*)()> >(void*) /usr/local/bin/../include/c++/v1/thread:357 (atomic_access+0x0000000aa49a)
Previous read of size 1 at 0x7d080000dfa4 by thread T13:
#0 memmove /home/development/llvm/3.8.0/final/http://ift.tt/1O5C9v6 (atomic_access+0x000000021d1e)
#1 std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::assign(char const*, unsigned long) <null> (libc++.so.1+0x00000003de06)
#2 std::__1::__unique_if<Node>::__unique_single std::__1::make_unique<Node, Node const&>(Node const&) /usr/local/bin/../include/c++/v1/memory:3152:32 (atomic_access+0x0000000a979c)
#3 write_node() /home/yosef/Desktop/Xapiand/src/atomic_access.cc:61 (atomic_access+0x0000000a979c)
#4 _ZNSt3__18__invokeIPFvvEJEEEDTclclsr3std3__1E7forwardIT_Efp_Espclsr3std3__1E7forwardIT0_Efp0_EEEOS3_DpOS4_ /usr/local/bin/../include/c++/v1/__functional_base:365:12 (atomic_access+0x0000000aa49a)
#5 void std::__1::__thread_execute<void (*)()>(std::__1::tuple<void (*)()>&, std::__1::__tuple_indices<>) /usr/local/bin/../include/c++/v1/thread:347 (atomic_access+0x0000000aa49a)
#6 void* std::__1::__thread_proxy<std::__1::tuple<void (*)()> >(void*) /usr/local/bin/../include/c++/v1/thread:357 (atomic_access+0x0000000aa49a)
Aucun commentaire:
Enregistrer un commentaire