aboutsummaryrefslogtreecommitdiff
path: root/sdk/toolkit/source/NvBlastTkEventQueue.h
blob: 86d793f8f504001dbe8df5c969c543c52b20ab0a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
// This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2016-2018 NVIDIA Corporation. All rights reserved.


#ifndef NVBLASTTKEVENTQUEUE_H
#define NVBLASTTKEVENTQUEUE_H

#include <algorithm>
#include <vector>

#include <mutex>
#include <atomic>

#include "NvBlastTkFrameworkImpl.h"
#include "NvBlastAssert.h"


namespace Nv {
namespace Blast {

/**
A dispatcher queue providing preallocation and thread-safe insertions therein.

Typical usage:
- preallocate space for events and payload:											
 - reserveEvents, reserveData
- enable asserts to detect undersized storage (allocations are not thread safe):	
 - protect(true)
- get pointers to payload data and events to fill in, thread safe for preallocated memory:												
 - allocData, addEvent
- back on main thread, ensure consistency:											
 - protect(false)

- continue adding events and payload on main thread if necessary like above (allocations are safe here)
eventually dispatch, or reset if dispatched by proxy
*/
class TkEventQueue
{
public:
	TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {}

	/**
	Peek events queue for dispatch.
	Do not use in protected state.
	*/
	operator const Array<TkEvent>::type&() 
	{
		NVBLAST_ASSERT(m_allowAllocs);
		NVBLAST_ASSERT(m_currentEvent == m_events.size());
		return m_events; 
	}

	/** 
	Debug help to catch (unwanted) allocations during task work.
	Note that this will not actually avoid allocations, but assert in debug builds.

	Set true before using in distributed environment.
	Set false to return to single-thread mode.
	*/
	void protect(bool enable)
	{
		// During parallel use, m_events.size() and m_currentEvent are allowed to diverge.
		// This is fine because resizeUninitialized does not alter the stored data.
		NVBLAST_ASSERT(m_currentEvent <= m_events.capacity());
		m_events.resizeUninitialized(m_currentEvent);
		m_allowAllocs = !enable;
	}

	/**
	Restores initial state.
	Data memory is currently not being reused. To be improved.
	*/
	void reset()
	{
		m_events.clear();
		m_currentEvent = 0;
		for (void* mem : m_memory)
		{
			NVBLAST_FREE(mem);
		}
		m_memory.clear();
		m_currentData = 0;
		m_allowAllocs = true;
		m_poolCapacity = 0;
		m_pool = nullptr;
	}

	/**
	Queue an event with a payload.
	*/
	template<class T>
	void addEvent(T* payload)
	{
		uint32_t index = m_currentEvent.fetch_add(1);

		// Should not allocate in protected state.
		NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity());

		m_events.resizeUninitialized(m_currentEvent);

		// During parallel use, m_events.size() and m_currentEvent are allowed to diverge.
		// Consistency is restored in protect().
		NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size());

		TkEvent& evt = m_events[index];
		evt.type = TkEvent::Type(T::EVENT_TYPE);
		evt.payload = payload;
	}

	/**
	Request storage for payload.
	*/
	template<typename T>
	T* allocData()
	{
		uint32_t index = m_currentData.fetch_add(sizeof(T));
		if (m_currentData <= m_poolCapacity)
		{
			return reinterpret_cast<T*>(&m_pool[index]);
		}
		else
		{
			// Could do larger block allocation here.
			reserveData(sizeof(T));
			// Account for the requested size.
			m_currentData = sizeof(T);
			return reinterpret_cast<T*>(&m_pool[0]);
		}
	}

	/**
	Preallocate a memory block of size Bytes for payload data.
	Note that this will inevitably allocate a new memory block.
	Subsequent calls to allocData will use this memory piecewise.
	*/
	void reserveData(size_t size)
	{
		NVBLAST_ASSERT(m_allowAllocs);
		m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size));
		m_poolCapacity = size;
		m_currentData = 0;
	}

	/**
	Preallocate space for events.
	*/
	void reserveEvents(uint32_t n)
	{
		NVBLAST_ASSERT(m_allowAllocs);
		m_events.reserve(m_events.size() + n);
	}

	/**
	Add a listener to dispatch to.
	*/
	void addListener(TkEventListener& l)
	{
		m_listeners.pushBack(&l);
	}

	/**
	Remove a listener from dispatch list.
	*/
	void removeListener(TkEventListener& l)
	{
		m_listeners.findAndReplaceWithLast(&l);
	}

	/**
	Dispatch the stored events to the registered listeners.
	After dispatch, all data is invalidated.
	*/
	void dispatch()
	{
		dispatch(*this);
		reset();
	}

	/**
	Proxy function to dispatch events to this queue's listeners.
	*/
	void dispatch(const Array<TkEvent>::type& events) const
	{
		if (events.size())
		{
			for (TkEventListener* l : m_listeners)
			{
				BLAST_PROFILE_SCOPE_M("TkEventQueue::dispatch");
				l->receive(events.begin(), events.size());
			}
		}
	}

private:
	/**
	Allocates and stores a block of size Bytes of payload data.
	*/
	void* allocDataBySize(size_t size)
	{
		void* memory = nullptr;
		if (size > 0)
		{
			memory = NVBLAST_ALLOC_NAMED(size, "TkEventQueue Data");
			m_memory.pushBack(memory);
		}
		return memory;
	}


	Array<TkEvent>::type					m_events;		//!< holds events
	Array<void*>::type						m_memory;		//!< holds allocated data memory blocks
	std::atomic<uint32_t>					m_currentEvent;	//!< reference index for event insertion
	std::atomic<uint32_t>					m_currentData;	//!< reference index for data insertion
	size_t									m_poolCapacity;	//!< size of the currently active memory block (m_pool)
	uint8_t*								m_pool;			//!< the current memory block allocData() uses
	bool									m_allowAllocs;	//!< assert guard
	InlineArray<TkEventListener*,4>::type	m_listeners;	//!< objects to dispatch to
};

}	// namespace Blast
}	// namespace Nv


#endif	// ifndef NVBLASTTKEVENTQUEUE_H