1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
|
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2018 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_SCB_DEFS
#define PX_PHYSICS_SCB_DEFS
#include "ScbBase.h"
// a Regular attribute of type T is one for which
// * the SC method takes a single argument of type ArgType<T> (defined below)
// * Scb either passes that argument through, or dumps it in a buffer to flush later.
// * PVD is notified when the variable changes
//
// For each such, we can define static methods to read and write the core and buffered variables,
// and capture the buffering logic in the BufferedAccess class.
//
// The dummy arg is necessary here because ISO permits partial specialization of member templates
// but not full specialization.
//
// putting just accessors and mutators here allows us to change the behavior just by varying the
// BufferAccess template (e.g. to compile without buffering), and also to size-reduce that template
// by passing function pointers if necessary
#define SCB_REGULAR_ATTRIBUTE(_val, _type, _name) \
enum { BF_##_name = 1<<(_val) }; \
_type m##_name; \
template<PxU32 Dummy> struct Fns<1<<(_val),Dummy> \
{ \
typedef typename ArgType<_type>::Type Arg; \
enum { flag = 1<<(_val) }; \
static PX_FORCE_INLINE Arg getBuffered(const Buf& buf) { return Arg(buf.m##_name);} \
static PX_FORCE_INLINE void setBuffered(Buf& buf, Arg v) { buf.m##_name = v;} \
static PX_FORCE_INLINE Arg getCore(const Core& core) { return Arg(core.get##_name());} \
static PX_FORCE_INLINE void setCore(Core& core, Arg v) { core.set##_name(v);} \
};
#define SCB_REGULAR_ATTRIBUTE_ALIGNED(_val, _type, _name, _alignment) \
enum { BF_##_name = 1<<(_val) }; \
PX_ALIGN(_alignment, _type) m##_name; \
template<PxU32 Dummy> struct Fns<1<<(_val),Dummy> \
{ \
typedef typename ArgType<_type>::Type Arg; \
enum { flag = 1<<(_val) }; \
static PX_FORCE_INLINE Arg getBuffered(const Buf& buf) { return buf.m##_name;} \
static PX_FORCE_INLINE void setBuffered(Buf& buf, Arg v) { buf.m##_name = v;} \
static PX_FORCE_INLINE Arg getCore(const Core& core) { return core.get##_name();} \
static PX_FORCE_INLINE void setCore(Core& core, Arg v) { core.set##_name(v);} \
};
namespace physx
{
namespace Scb
{
class Scene;
template<typename T> struct ArgType { typedef T Type; };
template<> struct ArgType<PxVec3> { typedef const PxVec3& Type; };
template<> struct ArgType<PxTransform> { typedef const PxTransform& Type; };
template<> struct ArgType<PxQuat> { typedef const PxQuat& Type; };
template<> struct ArgType<PxPlane> { typedef const PxPlane& Type; };
template<> struct ArgType<PxFilterData> { typedef const PxFilterData& Type; };
// TODO: should be able to size-reduce this if necessary by just generating one set per
// arg type instead of one per arg, by passing function pointers to the accessors/mutators/flag
// instead of instancing per type.
template<class Buf, class Core, class ScbClass, class BaseClass=Scb::Base> // BaseClass: introduced to have Scb::Body use custom location for storing buffered property flags
struct BufferedAccess
{
template<typename Fns>
static PX_FORCE_INLINE typename Fns::Arg read(const BaseClass& base, const Core& core)
{
return base.isBuffered(Fns::flag) ? Fns::getBuffered(*reinterpret_cast<const Buf*>(base.getStream()))
: Fns::getCore(core);
}
template<typename Fns>
static PX_FORCE_INLINE void write(BaseClass& base, Core& core, typename Fns::Arg v)
{
if(!base.isBuffering())
{
Fns::setCore(core, v);
#if PX_SUPPORT_PVD
if(base.getControlState() == ControlState::eIN_SCENE)
{
Scb::Scene* scene = base.getScbScene();
PX_ASSERT(scene);
scene->getScenePvdClient().updatePvdProperties(static_cast<ScbClass*>(&base));
}
#endif
}
else
{
Fns::setBuffered(*reinterpret_cast<Buf*>(base.getStream()), v);
base.markUpdated(Fns::flag);
}
}
template<typename Fns>
static PX_FORCE_INLINE void flush(const BaseClass& base, Core& core, const Buf& buf)
{
if(base.isBuffered(Fns::flag))
Fns::setCore(core, Fns::getBuffered(buf));
}
};
}
}
#endif
|