1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
|
<html>
<head>
<title>NVIDIA(R) PhysX(R) SDK 3.4 API Reference: physx::PxGpuDispatcher Class Reference</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<LINK HREF="NVIDIA.css" REL="stylesheet" TYPE="text/css">
</head>
<body bgcolor="#FFFFFF">
<div id="header">
<hr class="first">
<img alt="" src="images/PhysXlogo.png" align="middle"> <br>
<center>
<a class="qindex" href="main.html">Main Page</a>
<a class="qindex" href="hierarchy.html">Class Hierarchy</a>
<a class="qindex" href="annotated.html">Compound List</a>
<a class="qindex" href="functions.html">Compound Members</a>
</center>
<hr class="second">
</div>
<!-- Generated by Doxygen 1.5.8 -->
<div class="navpath"><a class="el" href="namespacephysx.html">physx</a>::<a class="el" href="classphysx_1_1PxGpuDispatcher.html">PxGpuDispatcher</a>
</div>
<div class="contents">
<h1>physx::PxGpuDispatcher Class Reference</h1><!-- doxytag: class="physx::PxGpuDispatcher" -->A GpuTask dispatcher.
<a href="#_details">More...</a>
<p>
<code>#include <<a class="el" href="PxGpuDispatcher_8h-source.html">PxGpuDispatcher.h</a>></code>
<p>
<p>
<a href="classphysx_1_1PxGpuDispatcher-members.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
<tr><td></td></tr>
<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#091cb121a38ddf39fcab51f5118a15d2">startSimulation</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Record the start of a simulation step. <a href="#091cb121a38ddf39fcab51f5118a15d2"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#86f6db8b18cf8380967890d694e3df8e">startGroup</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Record the start of a GpuTask batch submission. <a href="#86f6db8b18cf8380967890d694e3df8e"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#5ffc5d0c124afbcad0b6fcf1080af5ee">submitTask</a> (<a class="el" href="classphysx_1_1PxTask.html">PxTask</a> &task)=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Submit a GpuTask for execution. <a href="#5ffc5d0c124afbcad0b6fcf1080af5ee"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#2ec171af375195b782acce8fae4d2e93">finishGroup</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Record the end of a GpuTask batch submission. <a href="#2ec171af375195b782acce8fae4d2e93"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#27c2631f98a92733515f59823787fdc7">addCompletionPrereq</a> (<a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> &task)=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Add a CUDA completion prerequisite dependency to a task. <a href="#27c2631f98a92733515f59823787fdc7"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual PxCudaContextManager * </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#d903de1449d82e6d9e51f610711f74d9">getCudaContextManager</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Retrieve the PxCudaContextManager associated with this <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a>. <a href="#d903de1449d82e6d9e51f610711f74d9"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#b46896dea09105e1f5fc1d19a98f2b1b">stopSimulation</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Record the end of a simulation frame. <a href="#b46896dea09105e1f5fc1d19a98f2b1b"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual bool </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#3157fe3020f29f2a4c3bdc5eb11b79c6">failureDetected</a> () const =0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Returns true if a CUDA call has returned a non-recoverable error. <a href="#3157fe3020f29f2a4c3bdc5eb11b79c6"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#0310275811d2b9df30ef6dfeda59333b">forceFailureMode</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Force the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> into failure mode. <a href="#0310275811d2b9df30ef6dfeda59333b"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#7dbc8264abbffb8075f37e2d61205671">launchCopyKernel</a> (PxGpuCopyDesc *desc, uint32_t count, <a class="el" href="PxGpuDispatcher_8h.html#b946c7f02e09efd788a204718015d88a">CUstream</a> stream)=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Launch a copy kernel with arbitrary number of copy commands. <a href="#7dbc8264abbffb8075f37e2d61205671"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual <a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> & </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#3a12b661dab8671aaa9bd414c2b71440">getPreLaunchTask</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Query pre launch task that runs before launching gpu kernels. <a href="#3a12b661dab8671aaa9bd414c2b71440"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#7086ea741a0e605ba51f012743180353">addPreLaunchDependent</a> (<a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> &dependent)=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Adds a gpu launch task that gets executed after the pre launch task. <a href="#7086ea741a0e605ba51f012743180353"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual <a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> & </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#e3c292cd7b8a431fa969116423e32254">getPostLaunchTask</a> ()=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Query post launch task that runs after the gpu is done. <a href="#e3c292cd7b8a431fa969116423e32254"></a><br></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#c982dffd87d4252ea53906504d2cc349">addPostLaunchDependent</a> (<a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> &dependent)=0</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">Adds a task that gets executed after the post launch task. <a href="#c982dffd87d4252ea53906504d2cc349"></a><br></td></tr>
<tr><td colspan="2"><br><h2>Protected Member Functions</h2></td></tr>
<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual </td><td class="memItemRight" valign="bottom"><a class="el" href="classphysx_1_1PxGpuDispatcher.html#732e0687b5f29c5ec9cacf8674a0e805">~PxGpuDispatcher</a> ()</td></tr>
<tr><td class="mdescLeft"> </td><td class="mdescRight">protected destructor <a href="#732e0687b5f29c5ec9cacf8674a0e805"></a><br></td></tr>
</table>
<hr><a name="_details"></a><h2>Detailed Description</h2>
A GpuTask dispatcher.
<p>
A <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> executes GpuTasks submitted by one or more TaskManagers (one or more scenes). It maintains a CPU worker thread which waits on GpuTask "groups" to be submitted. The submission API is explicitly sessioned so that GpuTasks are dispatched together as a group whenever possible to improve parallelism on the GPU.<p>
A <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> cannot be allocated ad-hoc, they are created as a result of creating a PxCudaContextManager. Every PxCudaContextManager has a <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> instance that can be queried. In this way, each <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> is tied to exactly one CUDA context.<p>
A scene will use CPU fallback Tasks for GpuTasks if the <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> provided to it does not have a <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a>. For this reason, the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> must be assigned to the <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> before the <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> is given to a scene.<p>
Multiple TaskManagers may safely share a single <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> instance, thus enabling scenes to share a CUDA context.<p>
Only <a class="el" href="classphysx_1_1PxGpuDispatcher.html#3157fe3020f29f2a4c3bdc5eb11b79c6" title="Returns true if a CUDA call has returned a non-recoverable error.">failureDetected()</a> is intended for use by the user. The rest of the nvGpuDispatcher public methods are reserved for internal use by only both TaskManagers and GpuTasks. <hr><h2>Constructor & Destructor Documentation</h2>
<a class="anchor" name="732e0687b5f29c5ec9cacf8674a0e805"></a><!-- doxytag: member="physx::PxGpuDispatcher::~PxGpuDispatcher" ref="732e0687b5f29c5ec9cacf8674a0e805" args="()" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual physx::PxGpuDispatcher::~PxGpuDispatcher </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [inline, protected, virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
protected destructor
<p>
GpuDispatchers are allocated and freed by their PxCudaContextManager.
</div>
</div><p>
<hr><h2>Member Function Documentation</h2>
<a class="anchor" name="27c2631f98a92733515f59823787fdc7"></a><!-- doxytag: member="physx::PxGpuDispatcher::addCompletionPrereq" ref="27c2631f98a92733515f59823787fdc7" args="(PxBaseTask &task)=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::addCompletionPrereq </td>
<td>(</td>
<td class="paramtype"><a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> & </td>
<td class="paramname"> <em>task</em> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Add a CUDA completion prerequisite dependency to a task.
<p>
A GpuTask calls this function to add a prerequisite dependency on another task (usually a CpuTask) preventing that task from starting until all of the CUDA kernels and copies already launched have been completed. The <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> will increment that task's reference count, blocking its execution, until the CUDA work is complete.<p>
This is generally only required when a CPU task is expecting the results of the CUDA kernels to have been copied into host memory.<p>
This mechanism is not at all not required to ensure CUDA kernels and copies are issued in the correct order. Kernel issue order is determined by normal task dependencies. The rule of thumb is to only use a blocking completion prerequisite if the task in question depends on a completed GPU->Host DMA.<p>
The <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> issues a blocking event record to CUDA for the purposes of tracking the already submitted CUDA work. When this event is resolved, the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> manually decrements the reference count of the specified task, allowing it to execute (assuming it does not have other pending prerequisites).
</div>
</div><p>
<a class="anchor" name="c982dffd87d4252ea53906504d2cc349"></a><!-- doxytag: member="physx::PxGpuDispatcher::addPostLaunchDependent" ref="c982dffd87d4252ea53906504d2cc349" args="(PxBaseTask &dependent)=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::addPostLaunchDependent </td>
<td>(</td>
<td class="paramtype"><a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> & </td>
<td class="paramname"> <em>dependent</em> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Adds a task that gets executed after the post launch task.
<p>
This is part of an optional feature to schedule multiple gpu features at the same time to get kernels to run in parallel. <dl class="note" compact><dt><b>Note:</b></dt><dd>Each call adds a reference to the pre-launch task. </dd></dl>
</div>
</div><p>
<a class="anchor" name="7086ea741a0e605ba51f012743180353"></a><!-- doxytag: member="physx::PxGpuDispatcher::addPreLaunchDependent" ref="7086ea741a0e605ba51f012743180353" args="(PxBaseTask &dependent)=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::addPreLaunchDependent </td>
<td>(</td>
<td class="paramtype"><a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a> & </td>
<td class="paramname"> <em>dependent</em> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Adds a gpu launch task that gets executed after the pre launch task.
<p>
This is part of an optional feature to schedule multiple gpu features at the same time to get kernels to run in parallel. <dl class="note" compact><dt><b>Note:</b></dt><dd>Each call adds a reference to the pre-launch task. </dd></dl>
</div>
</div><p>
<a class="anchor" name="3157fe3020f29f2a4c3bdc5eb11b79c6"></a><!-- doxytag: member="physx::PxGpuDispatcher::failureDetected" ref="3157fe3020f29f2a4c3bdc5eb11b79c6" args="() const =0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual bool physx::PxGpuDispatcher::failureDetected </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td> const<code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Returns true if a CUDA call has returned a non-recoverable error.
<p>
A return value of true indicates a fatal error has occurred. To protect itself, the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> enters a fall through mode that allows GpuTasks to complete without being executed. This allows simulations to continue but leaves GPU content static or corrupted.<p>
The user may try to recover from these failures by deleting GPU content so the visual artifacts are minimized. But there is no way to recover the state of the GPU actors before the failure. Once a CUDA context is in this state, the only recourse is to create a new CUDA context, a new scene, and start over.<p>
This is our "Best Effort" attempt to not turn a soft failure into a hard failure because continued use of a CUDA context after it has returned an error will usually result in a driver reset. However if the initial failure was serious enough, a reset may have already occurred by the time we learn of it.
</div>
</div><p>
<a class="anchor" name="2ec171af375195b782acce8fae4d2e93"></a><!-- doxytag: member="physx::PxGpuDispatcher::finishGroup" ref="2ec171af375195b782acce8fae4d2e93" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::finishGroup </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Record the end of a GpuTask batch submission.
<p>
A <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> calls this function to notify the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> that it is done submitting a group of GpuTasks (GpuTasks which were all make ready to run by the same prerequisite dependency becoming resolved). If no other group submissions are in progress, the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> will execute the set of ready tasks.
</div>
</div><p>
<a class="anchor" name="0310275811d2b9df30ef6dfeda59333b"></a><!-- doxytag: member="physx::PxGpuDispatcher::forceFailureMode" ref="0310275811d2b9df30ef6dfeda59333b" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::forceFailureMode </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Force the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> into failure mode.
<p>
This API should be used if user code detects a non-recoverable CUDA error. This ensures the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> does not launch any further CUDA work. Subsequent calls to <a class="el" href="classphysx_1_1PxGpuDispatcher.html#3157fe3020f29f2a4c3bdc5eb11b79c6" title="Returns true if a CUDA call has returned a non-recoverable error.">failureDetected()</a> will return true.
</div>
</div><p>
<a class="anchor" name="d903de1449d82e6d9e51f610711f74d9"></a><!-- doxytag: member="physx::PxGpuDispatcher::getCudaContextManager" ref="d903de1449d82e6d9e51f610711f74d9" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual PxCudaContextManager* physx::PxGpuDispatcher::getCudaContextManager </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Retrieve the PxCudaContextManager associated with this <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a>.
<p>
Every PxCudaContextManager has one <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a>, and every <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> has one PxCudaContextManager.
</div>
</div><p>
<a class="anchor" name="e3c292cd7b8a431fa969116423e32254"></a><!-- doxytag: member="physx::PxGpuDispatcher::getPostLaunchTask" ref="e3c292cd7b8a431fa969116423e32254" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual <a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a>& physx::PxGpuDispatcher::getPostLaunchTask </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Query post launch task that runs after the gpu is done.
<p>
This is part of an optional feature to schedule multiple gpu features at the same time to get kernels to run in parallel. <dl class="note" compact><dt><b>Note:</b></dt><dd>Do *not* set the continuation on the returned task, but use <a class="el" href="classphysx_1_1PxGpuDispatcher.html#c982dffd87d4252ea53906504d2cc349" title="Adds a task that gets executed after the post launch task.">addPostLaunchDependent()</a>. </dd></dl>
</div>
</div><p>
<a class="anchor" name="3a12b661dab8671aaa9bd414c2b71440"></a><!-- doxytag: member="physx::PxGpuDispatcher::getPreLaunchTask" ref="3a12b661dab8671aaa9bd414c2b71440" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual <a class="el" href="classphysx_1_1PxBaseTask.html">PxBaseTask</a>& physx::PxGpuDispatcher::getPreLaunchTask </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Query pre launch task that runs before launching gpu kernels.
<p>
This is part of an optional feature to schedule multiple gpu features at the same time to get kernels to run in parallel. <dl class="note" compact><dt><b>Note:</b></dt><dd>Do *not* set the continuation on the returned task, but use <a class="el" href="classphysx_1_1PxGpuDispatcher.html#7086ea741a0e605ba51f012743180353" title="Adds a gpu launch task that gets executed after the pre launch task.">addPreLaunchDependent()</a>. </dd></dl>
</div>
</div><p>
<a class="anchor" name="7dbc8264abbffb8075f37e2d61205671"></a><!-- doxytag: member="physx::PxGpuDispatcher::launchCopyKernel" ref="7dbc8264abbffb8075f37e2d61205671" args="(PxGpuCopyDesc *desc, uint32_t count, CUstream stream)=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::launchCopyKernel </td>
<td>(</td>
<td class="paramtype">PxGpuCopyDesc * </td>
<td class="paramname"> <em>desc</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">uint32_t </td>
<td class="paramname"> <em>count</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="PxGpuDispatcher_8h.html#b946c7f02e09efd788a204718015d88a">CUstream</a> </td>
<td class="paramname"> <em>stream</em></td><td> </td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td></td><td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Launch a copy kernel with arbitrary number of copy commands.
<p>
This method is intended to be called from Kernel GpuTasks, but it can function outside of that context as well.<p>
If count is 1, the descriptor is passed to the kernel as arguments, so it may be declared on the stack.<p>
If count is greater than 1, the kernel will read the descriptors out of host memory. Because of this, the descriptor array must be located in page locked (pinned) memory. The provided descriptors may be modified by this method (converting host pointers to their GPU mapped equivalents) and should be considered *owned* by CUDA until the current batch of work has completed, so descriptor arrays should not be freed or modified until you have received a completion notification.<p>
If your GPU does not support mapping of page locked memory (SM>=1.1), this function degrades to calling CUDA copy methods.
</div>
</div><p>
<a class="anchor" name="86f6db8b18cf8380967890d694e3df8e"></a><!-- doxytag: member="physx::PxGpuDispatcher::startGroup" ref="86f6db8b18cf8380967890d694e3df8e" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::startGroup </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Record the start of a GpuTask batch submission.
<p>
A <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> calls this function to notify the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> that one or more GpuTasks are about to be submitted for execution. The <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> will not read the incoming task queue until it receives one <a class="el" href="classphysx_1_1PxGpuDispatcher.html#2ec171af375195b782acce8fae4d2e93" title="Record the end of a GpuTask batch submission.">finishGroup()</a> call for each <a class="el" href="classphysx_1_1PxGpuDispatcher.html#86f6db8b18cf8380967890d694e3df8e" title="Record the start of a GpuTask batch submission.">startGroup()</a> call. This is to ensure as many GpuTasks as possible are executed together as a group, generating optimal parallelism on the GPU.
</div>
</div><p>
<a class="anchor" name="091cb121a38ddf39fcab51f5118a15d2"></a><!-- doxytag: member="physx::PxGpuDispatcher::startSimulation" ref="091cb121a38ddf39fcab51f5118a15d2" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::startSimulation </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Record the start of a simulation step.
<p>
A <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> calls this function to record the beginning of a simulation step. The <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> uses this notification to initialize the profiler state.
</div>
</div><p>
<a class="anchor" name="b46896dea09105e1f5fc1d19a98f2b1b"></a><!-- doxytag: member="physx::PxGpuDispatcher::stopSimulation" ref="b46896dea09105e1f5fc1d19a98f2b1b" args="()=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::stopSimulation </td>
<td>(</td>
<td class="paramname"> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Record the end of a simulation frame.
<p>
A <a class="el" href="classphysx_1_1PxTaskManager.html" title="The PxTaskManager interface.">PxTaskManager</a> calls this function to record the completion of its dependency graph. If profiling is enabled, the <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> will trigger the retrieval of profiling data from the GPU at this point.
</div>
</div><p>
<a class="anchor" name="5ffc5d0c124afbcad0b6fcf1080af5ee"></a><!-- doxytag: member="physx::PxGpuDispatcher::submitTask" ref="5ffc5d0c124afbcad0b6fcf1080af5ee" args="(PxTask &task)=0" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">virtual void physx::PxGpuDispatcher::submitTask </td>
<td>(</td>
<td class="paramtype"><a class="el" href="classphysx_1_1PxTask.html">PxTask</a> & </td>
<td class="paramname"> <em>task</em> </td>
<td> ) </td>
<td><code> [pure virtual]</code></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>
Submit a GpuTask for execution.
<p>
Submitted tasks are pushed onto an incoming queue. The <a class="el" href="classphysx_1_1PxGpuDispatcher.html" title="A GpuTask dispatcher.">PxGpuDispatcher</a> will take the contents of this queue every time the pending group count reaches 0 and run the group of submitted GpuTasks as an interleaved group.
</div>
</div><p>
<hr>The documentation for this class was generated from the following file:<ul>
<li><a class="el" href="PxGpuDispatcher_8h-source.html">PxGpuDispatcher.h</a></ul>
</div>
<hr style="width: 100%; height: 2px;"><br>
Copyright © 2008-2018 NVIDIA Corporation, 2701 San Tomas Expressway, Santa Clara, CA 95050 U.S.A. All rights reserved. <a href="http://www.nvidia.com ">www.nvidia.com</a>
</body>
</html>
|