// Copyright Epic Games, Inc. All Rights Reserved. #pragma once /* {{{1 standalone_prologue.h */ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #if !defined(TRACE_UE_COMPAT_LAYER) # if defined(__UNREAL__) && __UNREAL__ # define TRACE_UE_COMPAT_LAYER 0 # else # define TRACE_UE_COMPAT_LAYER 1 # endif #endif #if TRACE_UE_COMPAT_LAYER // platform defines #define PLATFORM_ANDROID 0 #define PLATFORM_APPLE 0 #define PLATFORM_HOLOLENS 0 #define PLATFORM_MAC 0 #define PLATFORM_UNIX 0 #define PLATFORM_WINDOWS 0 #ifdef _WIN32 # undef PLATFORM_WINDOWS # define PLATFORM_WINDOWS 1 #elif defined(__linux__) # undef PLATFORM_UNIX # define PLATFORM_UNIX 1 #elif defined(__APPLE__) # undef PLATFORM_MAC # define PLATFORM_MAC 1 # undef PLATFORM_APPLE # define PLATFORM_APPLE 1 #endif #if PLATFORM_MAC #endif // arch defines #if defined(__amd64__) || defined(_M_X64) # define PLATFORM_CPU_X86_FAMILY 1 # define PLATFORM_CPU_ARM_FAMILY 0 # define PLATFORM_64BITS 1 # define PLATFORM_CACHE_LINE_SIZE 64 #elif defined(__arm64__) || defined(_M_ARM64) # define PLATFORM_CPU_X86_FAMILY 0 # define PLATFORM_CPU_ARM_FAMILY 1 # define PLATFORM_64BITS 1 # define PLATFORM_CACHE_LINE_SIZE 64 #else # error Unknown architecture #endif // external includes #include #include #include #if PLATFORM_WINDOWS # if !defined(WIN32_LEAN_AND_MEAN) # define WIN32_LEAN_AND_MEAN # endif # if !defined(NOGDI) # define NOGDI # endif # if !defined(NOMINMAX) # define NOMINMAX # endif # include #endif // types using uint8 = uint8_t; using uint16 = uint16_t; using uint32 = uint32_t; using uint64 = uint64_t; using int8 = int8_t; using int16 = int16_t; using int32 = int32_t; using int64 = int64_t; using UPTRINT = uintptr_t; using PTRINT = intptr_t; using SIZE_T = size_t; #if PLATFORM_WINDOWS # undef TEXT #endif #define TEXT(x) x #define TCHAR ANSICHAR using ANSICHAR = char; using WIDECHAR = wchar_t; // keywords #if defined(_MSC_VER) # define FORCENOINLINE __declspec(noinline) # define FORCEINLINE __forceinline #else # define FORCENOINLINE inline __attribute__((noinline)) # define FORCEINLINE inline __attribute__((always_inline)) #endif #if defined(_MSC_VER) # define LIKELY(x) x # define UNLIKELY(x) x #else # define LIKELY(x) __builtin_expect(!!(x), 1) # define UNLIKELY(x) __builtin_expect(!!(x), 0) #endif #define UE_ARRAY_COUNT(x) (sizeof(x) / sizeof(x[0])) // so/dll #if defined(TRACE_DLL_EXPORT) # if PLATFORM_WINDOWS && defined(TRACE_DLL_EXPORT) # if TRACE_IMPLEMENT # define TRACELOG_API __declspec(dllexport) # else # define TRACELOG_API __declspec(dllimport) # endif # else # define TRACELOG_API __attribute__ ((visibility ("default"))) # endif #else # define TRACELOG_API #endif #if !defined(IS_MONOLITHIC) # define IS_MONOLITHIC 0 #endif // misc defines #define THIRD_PARTY_INCLUDES_END #define THIRD_PARTY_INCLUDES_START #define TRACE_ENABLED 1 #define TRACE_PRIVATE_CONTROL_ENABLED 0 #define TRACE_PRIVATE_EXTERNAL_LZ4 1 #define UE_TRACE_ENABLED TRACE_ENABLED #define UE_BUILD_SHIPPING 1 // api template inline auto Forward(T t) { return std::forward(t); } #endif // TRACE_UE_COMPAT_LAYER #include #include "lz4.h" #if PLATFORM_WINDOWS # pragma warning(push) # pragma warning(disable : 4200) // zero-sized arrays # pragma warning(disable : 4201) // anonymous structs # pragma warning(disable : 4127) // conditional expr. is constant #endif /* {{{1 Config.h */ #if !defined(UE_TRACE_ENABLED) # if !UE_BUILD_SHIPPING && !IS_PROGRAM # if PLATFORM_WINDOWS || PLATFORM_UNIX || PLATFORM_APPLE || PLATFORM_ANDROID || PLATFORM_HOLOLENS # define UE_TRACE_ENABLED 1 # endif # endif #endif #if !defined(UE_TRACE_ENABLED) # define UE_TRACE_ENABLED 0 #endif #if UE_TRACE_ENABLED # define TRACE_PRIVATE_PROTOCOL_5 #endif /* {{{1 Trace.h */ #if UE_TRACE_ENABLED #include namespace UE { namespace Trace { class FChannel; } // namespace Trace } // namespace UE #define TRACE_PRIVATE_STATISTICS (!UE_BUILD_SHIPPING) #define TRACE_PRIVATE_CHANNEL_DEFAULT_ARGS false, "None" #define TRACE_PRIVATE_CHANNEL_DECLARE(LinkageType, ChannelName) \ static UE::Trace::FChannel ChannelName##Object; \ LinkageType UE::Trace::FChannel& ChannelName = ChannelName##Object; #define TRACE_PRIVATE_CHANNEL_IMPL(ChannelName, ...) \ struct F##ChannelName##Registrator \ { \ F##ChannelName##Registrator() \ { \ ChannelName##Object.Setup(#ChannelName, { __VA_ARGS__ } ); \ } \ }; \ static F##ChannelName##Registrator ChannelName##Reg = F##ChannelName##Registrator(); #define TRACE_PRIVATE_CHANNEL(ChannelName, ...) \ TRACE_PRIVATE_CHANNEL_DECLARE(static, ChannelName) \ TRACE_PRIVATE_CHANNEL_IMPL(ChannelName, ##__VA_ARGS__) #define TRACE_PRIVATE_CHANNEL_DEFINE(ChannelName, ...) \ TRACE_PRIVATE_CHANNEL_DECLARE(, ChannelName) \ TRACE_PRIVATE_CHANNEL_IMPL(ChannelName, ##__VA_ARGS__) #define TRACE_PRIVATE_CHANNEL_EXTERN(ChannelName, ...) \ __VA_ARGS__ extern UE::Trace::FChannel& ChannelName; #define TRACE_PRIVATE_CHANNELEXPR_IS_ENABLED(ChannelsExpr) \ bool(ChannelsExpr) #define TRACE_PRIVATE_EVENT_DEFINE(LoggerName, EventName) \ UE::Trace::Private::FEventNode LoggerName##EventName##Event; #define TRACE_PRIVATE_EVENT_BEGIN(LoggerName, EventName, ...) \ TRACE_PRIVATE_EVENT_BEGIN_IMPL(static, LoggerName, EventName, ##__VA_ARGS__) #define TRACE_PRIVATE_EVENT_BEGIN_EXTERN(LoggerName, EventName, ...) \ TRACE_PRIVATE_EVENT_BEGIN_IMPL(extern, LoggerName, EventName, ##__VA_ARGS__) #define TRACE_PRIVATE_EVENT_BEGIN_IMPL(LinkageType, LoggerName, EventName, ...) \ LinkageType TRACE_PRIVATE_EVENT_DEFINE(LoggerName, EventName) \ struct F##LoggerName##EventName##Fields \ { \ enum \ { \ Important = UE::Trace::Private::FEventInfo::Flag_Important, \ NoSync = UE::Trace::Private::FEventInfo::Flag_NoSync, \ PartialEventFlags = (0, ##__VA_ARGS__), \ }; \ enum : bool { bIsImportant = ((0, ##__VA_ARGS__) & Important) != 0, }; \ static constexpr uint32 GetSize() { return EventProps_Meta::Size; } \ static uint32 GetUid() { static uint32 Uid = 0; return (Uid = Uid ? Uid : Initialize()); } \ static uint32 FORCENOINLINE Initialize() \ { \ static const uint32 Uid_ThreadSafeInit = [] () \ { \ using namespace UE::Trace; \ static F##LoggerName##EventName##Fields Fields; \ static UE::Trace::Private::FEventInfo Info = \ { \ FLiteralName(#LoggerName), \ FLiteralName(#EventName), \ (FFieldDesc*)(&Fields), \ uint16(sizeof(Fields) / sizeof(FFieldDesc)), \ uint16(EventFlags), \ }; \ return LoggerName##EventName##Event.Initialize(&Info); \ }(); \ return Uid_ThreadSafeInit; \ } \ typedef UE::Trace::TField<0 /*Index*/, 0 /*Offset*/, #define TRACE_PRIVATE_EVENT_FIELD(FieldType, FieldName) \ FieldType> FieldName##_Meta; \ FieldName##_Meta const FieldName##_Field = UE::Trace::FLiteralName(#FieldName); \ template auto FieldName(Ts... ts) const { \ LogScopeType::FFieldSet::Impl((LogScopeType*)this, Forward(ts)...); \ return true; \ } \ typedef UE::Trace::TField< \ FieldName##_Meta::Index + 1, \ FieldName##_Meta::Offset + FieldName##_Meta::Size, #define TRACE_PRIVATE_EVENT_END() \ UE::Trace::EventProps> EventProps_Meta; \ EventProps_Meta const EventProps_Private = {}; \ typedef std::conditional::type LogScopeType; \ explicit operator bool () const { return true; } \ enum { EventFlags = PartialEventFlags|((EventProps_Meta::NumAuxFields != 0) ? UE::Trace::Private::FEventInfo::Flag_MaybeHasAux : 0), }; \ static_assert( \ !bIsImportant || (uint32(EventFlags) & uint32(UE::Trace::Private::FEventInfo::Flag_NoSync)), \ "Trace events flagged as Important events must be marked NoSync" \ ); \ }; #define TRACE_PRIVATE_LOG_PRELUDE(EnterFunc, LoggerName, EventName, ChannelsExpr, ...) \ if (TRACE_PRIVATE_CHANNELEXPR_IS_ENABLED(ChannelsExpr)) \ if (auto LogScope = F##LoggerName##EventName##Fields::LogScopeType::EnterFunc(__VA_ARGS__)) \ if (const auto& __restrict EventName = *(F##LoggerName##EventName##Fields*)(&LogScope)) \ ((void)EventName), #define TRACE_PRIVATE_LOG_EPILOG() \ LogScope += LogScope #define TRACE_PRIVATE_LOG(LoggerName, EventName, ChannelsExpr, ...) \ TRACE_PRIVATE_LOG_PRELUDE(Enter, LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) \ TRACE_PRIVATE_LOG_EPILOG() #define TRACE_PRIVATE_LOG_SCOPED(LoggerName, EventName, ChannelsExpr, ...) \ UE::Trace::Private::FScopedLogScope PREPROCESSOR_JOIN(TheScope, __LINE__); \ TRACE_PRIVATE_LOG_PRELUDE(ScopedEnter, LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) \ PREPROCESSOR_JOIN(TheScope, __LINE__).SetActive(), \ TRACE_PRIVATE_LOG_EPILOG() #define TRACE_PRIVATE_LOG_SCOPED_T(LoggerName, EventName, ChannelsExpr, ...) \ UE::Trace::Private::FScopedStampedLogScope PREPROCESSOR_JOIN(TheScope, __LINE__); \ TRACE_PRIVATE_LOG_PRELUDE(ScopedStampedEnter, LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) \ PREPROCESSOR_JOIN(TheScope, __LINE__).SetActive(), \ TRACE_PRIVATE_LOG_EPILOG() #else #define TRACE_PRIVATE_CHANNEL(ChannelName, ...) #define TRACE_PRIVATE_CHANNEL_EXTERN(ChannelName, ...) #define TRACE_PRIVATE_CHANNEL_DEFINE(ChannelName, ...) #define TRACE_PRIVATE_CHANNELEXPR_IS_ENABLED(ChannelsExpr) \ false #define TRACE_PRIVATE_EVENT_DEFINE(LoggerName, EventName) #define TRACE_PRIVATE_EVENT_BEGIN(LoggerName, EventName, ...) \ TRACE_PRIVATE_EVENT_BEGIN_IMPL(LoggerName, EventName) #define TRACE_PRIVATE_EVENT_BEGIN_EXTERN(LoggerName, EventName, ...) \ TRACE_PRIVATE_EVENT_BEGIN_IMPL(LoggerName, EventName) #define TRACE_PRIVATE_EVENT_BEGIN_IMPL(LoggerName, EventName) \ struct F##LoggerName##EventName##Dummy \ { \ struct FTraceDisabled \ { \ const FTraceDisabled& operator () (...) const { return *this; } \ }; \ const F##LoggerName##EventName##Dummy& operator << (const FTraceDisabled&) const \ { \ return *this; \ } \ explicit operator bool () const { return false; } #define TRACE_PRIVATE_EVENT_FIELD(FieldType, FieldName) \ const FTraceDisabled& FieldName; #define TRACE_PRIVATE_EVENT_END() \ }; #define TRACE_PRIVATE_LOG(LoggerName, EventName, ...) \ if (const auto& EventName = *(F##LoggerName##EventName##Dummy*)1) \ EventName #define TRACE_PRIVATE_LOG_SCOPED(LoggerName, EventName, ...) \ if (const auto& EventName = *(F##LoggerName##EventName##Dummy*)1) \ EventName #define TRACE_PRIVATE_LOG_SCOPED_T(LoggerName, EventName, ...) \ if (const auto& EventName = *(F##LoggerName##EventName##Dummy*)1) \ EventName #endif // UE_TRACE_ENABLED /* {{{1 Trace.h */ #if UE_TRACE_ENABLED # define UE_TRACE_IMPL(...) # define UE_TRACE_API TRACELOG_API #else # define UE_TRACE_IMPL(...) { return __VA_ARGS__; } # define UE_TRACE_API inline #endif namespace UE { namespace Trace { enum AnsiString {}; enum WideString {}; struct FInitializeDesc { uint32 TailSizeBytes = 4 << 20; bool bUseWorkerThread = true; bool bUseImportantCache = true; }; typedef void* AllocFunc(SIZE_T, uint32); typedef void FreeFunc(void*, SIZE_T); typedef void ChannelIterFunc(const ANSICHAR*, bool, void*); struct FStatistics { uint64 BytesSent; uint64 BytesTraced; uint64 MemoryUsed; uint32 CacheUsed; // Important-marked events are uint32 CacheWaste; // stored in the cache. }; UE_TRACE_API void SetMemoryHooks(AllocFunc Alloc, FreeFunc Free) UE_TRACE_IMPL(); UE_TRACE_API void Initialize(const FInitializeDesc& Desc) UE_TRACE_IMPL(); UE_TRACE_API void StartWorkerThread() UE_TRACE_IMPL(); UE_TRACE_API void Shutdown() UE_TRACE_IMPL(); UE_TRACE_API void Update() UE_TRACE_IMPL(); UE_TRACE_API void GetStatistics(FStatistics& Out) UE_TRACE_IMPL(); UE_TRACE_API bool SendTo(const TCHAR* Host, uint32 Port=0) UE_TRACE_IMPL(false); UE_TRACE_API bool WriteTo(const TCHAR* Path) UE_TRACE_IMPL(false); UE_TRACE_API bool IsTracing() UE_TRACE_IMPL(false); UE_TRACE_API bool Stop() UE_TRACE_IMPL(false); UE_TRACE_API bool IsChannel(const TCHAR* ChanneName) UE_TRACE_IMPL(false); UE_TRACE_API bool ToggleChannel(const TCHAR* ChannelName, bool bEnabled) UE_TRACE_IMPL(false); UE_TRACE_API void EnumerateChannels(ChannelIterFunc IterFunc, void* User); UE_TRACE_API void ThreadRegister(const TCHAR* Name, uint32 SystemId, int32 SortHint) UE_TRACE_IMPL(); UE_TRACE_API void ThreadGroupBegin(const TCHAR* Name) UE_TRACE_IMPL(); UE_TRACE_API void ThreadGroupEnd() UE_TRACE_IMPL(); } // namespace Trace } // namespace UE #define UE_TRACE_EVENT_DEFINE(LoggerName, EventName) TRACE_PRIVATE_EVENT_DEFINE(LoggerName, EventName) #define UE_TRACE_EVENT_BEGIN(LoggerName, EventName, ...) TRACE_PRIVATE_EVENT_BEGIN(LoggerName, EventName, ##__VA_ARGS__) #define UE_TRACE_EVENT_BEGIN_EXTERN(LoggerName, EventName, ...) TRACE_PRIVATE_EVENT_BEGIN_EXTERN(LoggerName, EventName, ##__VA_ARGS__) #define UE_TRACE_EVENT_FIELD(FieldType, FieldName) TRACE_PRIVATE_EVENT_FIELD(FieldType, FieldName) #define UE_TRACE_EVENT_END() TRACE_PRIVATE_EVENT_END() #define UE_TRACE_LOG(LoggerName, EventName, ChannelsExpr, ...) TRACE_PRIVATE_LOG(LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) #define UE_TRACE_LOG_SCOPED(LoggerName, EventName, ChannelsExpr, ...) TRACE_PRIVATE_LOG_SCOPED(LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) #define UE_TRACE_LOG_SCOPED_T(LoggerName, EventName, ChannelsExpr, ...) TRACE_PRIVATE_LOG_SCOPED_T(LoggerName, EventName, ChannelsExpr, ##__VA_ARGS__) #define UE_TRACE_CHANNEL(ChannelName, ...) TRACE_PRIVATE_CHANNEL(ChannelName, ##__VA_ARGS__) #define UE_TRACE_CHANNEL_EXTERN(ChannelName, ...) TRACE_PRIVATE_CHANNEL_EXTERN(ChannelName, ##__VA_ARGS__) #define UE_TRACE_CHANNEL_DEFINE(ChannelName, ...) TRACE_PRIVATE_CHANNEL_DEFINE(ChannelName, ##__VA_ARGS__) #define UE_TRACE_CHANNELEXPR_IS_ENABLED(ChannelsExpr) TRACE_PRIVATE_CHANNELEXPR_IS_ENABLED(ChannelsExpr) /* {{{1 Channel.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { typedef void ChannelIterFunc(const ANSICHAR*, bool, void*); /* A named channel which can be used to filter trace events. Channels can be combined using the '|' operator which allows expressions like ``` UE_TRACE_LOG(FooWriter, FooEvent, FooChannel|BarChannel); ``` Note that this works as an AND operator, similar to how a bitmask is constructed. Channels are by default enabled until this method is called. This is to allow events to be emitted during static initialization. In fact all events during this phase are always emitted. In this method we disable all channels except those specified on the command line using -tracechannels argument. */ class FChannel { public: struct Iter { ~Iter(); const FChannel* GetNext(); void* Inner[3]; }; struct InitArgs { const ANSICHAR* Desc; // User facing description string bool bReadOnly; // If set, channel cannot be changed during a run, only set through command line. }; TRACELOG_API void Setup(const ANSICHAR* InChannelName, const InitArgs& Args); TRACELOG_API static void Initialize(); static Iter ReadNew(); void Announce() const; static bool Toggle(const ANSICHAR* ChannelName, bool bEnabled); static void ToggleAll(bool bEnabled); static FChannel* FindChannel(const ANSICHAR* ChannelName); static void EnumerateChannels(ChannelIterFunc Func, void* User); bool Toggle(bool bEnabled); bool IsEnabled() const; explicit operator bool () const; bool operator | (const FChannel& Rhs) const; private: FChannel* Next; struct { const ANSICHAR* Ptr; uint32 Len; uint32 Hash; } Name; volatile int32 Enabled; InitArgs Args; }; } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Channel.inl */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { extern TRACELOG_API FChannel& TraceLogChannel; inline bool FChannel::IsEnabled() const { return Enabled >= 0; } inline FChannel::operator bool () const { return IsEnabled(); } inline bool FChannel::operator | (const FChannel& Rhs) const { return IsEnabled() && Rhs.IsEnabled(); } } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Atomic.h */ #include #if PLATFORM_CPU_X86_FAMILY # include #endif namespace UE { namespace Trace { namespace Private { template Type AtomicLoadRelaxed(Type volatile* Source); template Type AtomicLoadAcquire(Type volatile* Source); template void AtomicStoreRelaxed(Type volatile* Target, Type Value); template void AtomicStoreRelease(Type volatile* Target, Type Value); template Type AtomicExchangeAcquire(Type volatile* Target, Type Value); template Type AtomicExchangeRelease(Type volatile* Target, Type Value); template bool AtomicCompareExchangeRelaxed(Type volatile* Target, Type New, Type Expected); template bool AtomicCompareExchangeAcquire(Type volatile* Target, Type New, Type Expected); template bool AtomicCompareExchangeRelease(Type volatile* Target, Type New, Type Expected); template Type AtomicAddRelaxed(Type volatile* Target, Type Value); template Type AtomicAddRelease(Type volatile* Target, Type Value); template Type AtomicAddAcquire(Type volatile* Target, Type Value); void PlatformYield(); inline void PlatformYield() { #if PLATFORM_CPU_X86_FAMILY _mm_pause(); #elif PLATFORM_CPU_ARM_FAMILY # if defined(_MSC_VER) && !defined(__clang__) // MSVC __yield(); # else __builtin_arm_yield(); # endif #else #error Unsupported architecture! #endif } template inline Type AtomicLoadRelaxed(Type volatile* Source) { std::atomic* T = (std::atomic*) Source; return T->load(std::memory_order_relaxed); } template inline Type AtomicLoadAcquire(Type volatile* Source) { std::atomic* T = (std::atomic*) Source; return T->load(std::memory_order_acquire); } template inline void AtomicStoreRelaxed(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; T->store(Value, std::memory_order_relaxed); } template inline void AtomicStoreRelease(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; T->store(Value, std::memory_order_release); } template inline Type AtomicExchangeAcquire(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; return T->exchange(Value, std::memory_order_acquire); } template inline Type AtomicExchangeRelease(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; return T->exchange(Value, std::memory_order_release); } template inline bool AtomicCompareExchangeRelaxed(Type volatile* Target, Type New, Type Expected) { std::atomic* T = (std::atomic*) Target; return T->compare_exchange_weak(Expected, New, std::memory_order_relaxed); } template inline bool AtomicCompareExchangeAcquire(Type volatile* Target, Type New, Type Expected) { std::atomic* T = (std::atomic*) Target; return T->compare_exchange_weak(Expected, New, std::memory_order_acquire); } template inline bool AtomicCompareExchangeRelease(Type volatile* Target, Type New, Type Expected) { std::atomic* T = (std::atomic*) Target; return T->compare_exchange_weak(Expected, New, std::memory_order_release); } template inline Type AtomicAddRelaxed(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; return T->fetch_add(Value, std::memory_order_relaxed); } template inline Type AtomicAddAcquire(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; return T->fetch_add(Value, std::memory_order_acquire); } template inline Type AtomicAddRelease(Type volatile* Target, Type Value) { std::atomic* T = (std::atomic*) Target; return T->fetch_add(Value, std::memory_order_release); } } // namespace Private } // namespace Trace } // namespace UE /* {{{1 Protocol0.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_0) inline #endif namespace Protocol0 { enum EProtocol : uint8 { Id = 0 }; enum : uint8 { /* Category */ Field_CategoryMask = 0300, Field_Integer = 0000, Field_Float = 0100, Field_Array = 0200, /* Size */ Field_Pow2SizeMask = 0003, Field_8 = 0000, Field_16 = 0001, Field_32 = 0002, Field_64 = 0003, #if PLATFORM_64BITS Field_Ptr = Field_64, #else Field_Ptr = Field_32, #endif /* Specials */ Field_SpecialMask = 0030, Field_Pod = 0000, Field_String = 0010, /*Field_Unused_2 = 0020, ... Field_Unused_7 = 0070,*/ }; enum class EFieldType : uint8 { Bool = Field_Pod | Field_Integer | Field_8, Int8 = Field_Pod | Field_Integer | Field_8, Int16 = Field_Pod | Field_Integer | Field_16, Int32 = Field_Pod | Field_Integer | Field_32, Int64 = Field_Pod | Field_Integer | Field_64, Pointer = Field_Pod | Field_Integer | Field_Ptr, Float32 = Field_Pod | Field_Float | Field_32, Float64 = Field_Pod | Field_Float | Field_64, AnsiString = Field_String | Field_Integer|Field_Array | Field_8, WideString = Field_String | Field_Integer|Field_Array | Field_16, Array = Field_Array, }; struct FNewEventEvent { uint16 EventUid; uint8 FieldCount; uint8 Flags; uint8 LoggerNameSize; uint8 EventNameSize; struct { uint16 Offset; uint16 Size; uint8 TypeInfo; uint8 NameSize; } Fields[]; /*uint8 NameData[]*/ }; enum class EKnownEventUids : uint16 { NewEvent, User, Max = (1 << 14) - 1, // ...leaves two MSB bits for other uses. UidMask = Max, Invalid = Max, Flag_Important = 1 << 14, Flag_Unused = 1 << 15, }; struct FEventHeader { uint16 Uid; uint16 Size; uint8 EventData[]; }; } // namespace Protocol0 } // namespace Trace } // namespace UE /* {{{1 Protocol1.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_1) inline #endif namespace Protocol1 { enum EProtocol : uint8 { Id = 1 }; using Protocol0::EFieldType; using Protocol0::FNewEventEvent; enum class EEventFlags : uint8 { Important = 1 << 0, MaybeHasAux = 1 << 1, NoSync = 1 << 2, }; enum class EKnownEventUids : uint16 { NewEvent, User, Max = (1 << 15) - 1, UidMask = Max, Invalid = Max, }; struct FEventHeader { uint16 Uid; uint16 Size; uint16 Serial; uint8 EventData[]; }; struct FAuxHeader { enum : uint32 { AuxDataBit = 0x80, FieldMask = 0x7f, SizeLimit = 1 << 24, }; union { uint8 FieldIndex; // 7 bits max (MSB is used to indicate aux data) uint32 Size; // encoded as (Size & 0x00ffffff) << 8 }; uint8 Data[]; }; } // namespace Protocol1 } // namespace Trace } // namespace UE /* {{{1 Protocol2.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_2) inline #endif namespace Protocol2 { enum EProtocol : uint8 { Id = 2 }; using Protocol1::EFieldType; using Protocol1::FNewEventEvent; using Protocol1::EEventFlags; using Protocol1::EKnownEventUids; using Protocol1::FAuxHeader; struct FEventHeader { uint16 Uid; uint16 Size; }; #pragma pack(push, 1) struct FEventHeaderSync : public FEventHeader { uint16 SerialLow; // 24-bit... uint8 SerialHigh; // ...serial no. uint8 EventData[]; }; #pragma pack(pop) static_assert(sizeof(FEventHeaderSync) == 7, "Packing assumption doesn't hold"); } // namespace Protocol2 } // namespace Trace } // namespace UE /* {{{1 Protocol3.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_3) inline #endif namespace Protocol3 { enum EProtocol : uint8 { Id = 3 }; using Protocol2::EFieldType; using Protocol2::FNewEventEvent; using Protocol2::EEventFlags; using Protocol2::EKnownEventUids; using Protocol2::FAuxHeader; using Protocol2::FEventHeader; using Protocol2::FEventHeaderSync; } // namespace Protocol3 } // namespace Trace } // namespace UE /* {{{1 Protocol4.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_4) inline #endif namespace Protocol4 { enum EProtocol : uint8 { Id = 4 }; using Protocol3::EFieldType; using Protocol3::FNewEventEvent; using Protocol3::EEventFlags; using Protocol3::FAuxHeader; using Protocol3::FEventHeader; using Protocol3::FEventHeaderSync; struct EKnownEventUids { static const uint16 Flag_TwoByteUid = 1 << 0; static const uint16 _UidShift = 1; enum : uint16 { NewEvent = 0, EnterScope, EnterScope_T, LeaveScope, LeaveScope_T, _WellKnownNum, }; static const uint16 User = _WellKnownNum; static const uint16 Max = (1 << (16 - _UidShift)) - 1; static const uint16 Invalid = Max; }; } // namespace Protocol4 } // namespace Trace } // namespace UE /* {{{1 Protocol5.h */ namespace UE { namespace Trace { #if defined(TRACE_PRIVATE_PROTOCOL_5) inline #endif namespace Protocol5 { enum EProtocol : uint8 { Id = 5 }; using Protocol4::EFieldType; using Protocol4::FNewEventEvent; using Protocol4::EEventFlags; struct EKnownEventUids { static const uint16 Flag_TwoByteUid = 1 << 0; static const uint16 _UidShift = 1; enum : uint16 { NewEvent = 0, AuxData, _AuxData_Unused, AuxDataTerminal, EnterScope, LeaveScope, _Unused6, _Unused7, EnterScope_T, _EnterScope_T_Unused0, // reserved for variable _EnterScope_T_Unused1, // length timestamps _EnterScope_T_Unused2, LeaveScope_T, _LeaveScope_T_Unused0, _LeaveScope_T_Unused1, _LeaveScope_T_Unused2, _WellKnownNum, }; static const uint16 User = _WellKnownNum; static const uint16 Max = (1 << (16 - _UidShift)) - 1; static const uint16 Invalid = Max; }; struct FEventHeader { uint16 Uid; uint8 Data[]; }; static_assert(sizeof(FEventHeader) == 2, "Struct layout assumption doesn't match expectation"); struct FImportantEventHeader { uint16 Uid; uint16 Size; uint8 Data[]; }; static_assert(sizeof(FImportantEventHeader) == 4, "Struct layout assumption doesn't match expectation"); #pragma pack(push, 1) struct FEventHeaderSync { uint16 Uid; uint16 SerialLow; // 24-bit uint8 SerialHigh; // serial no. uint8 Data[]; }; #pragma pack(pop) static_assert(sizeof(FEventHeaderSync) == 5, "Packing assumption doesn't hold"); struct FAuxHeader { enum : uint32 { FieldShift = 8, FieldBits = 5, FieldMask = (1 << FieldBits) - 1, SizeShift = FieldShift + FieldBits, SizeLimit = 1 << (32 - SizeShift), }; union { struct { uint8 Uid; uint8 FieldIndex_Size; uint16 Size; }; uint32 Pack; }; uint8 Data[]; }; static_assert(sizeof(FAuxHeader) == 4, "Struct layout assumption doesn't match expectation"); } // namespace Protocol5 } // namespace Trace } // namespace UE /* {{{1 Protocol.h */ #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable : 4200) // non-standard zero-sized array #endif #if defined(_MSC_VER) #pragma warning(pop) #endif /* {{{1 Writer.inl */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { struct FWriteBuffer { uint8 Overflow[6]; uint16 Size; uint64 PrevTimestamp; FWriteBuffer* __restrict NextThread; FWriteBuffer* __restrict NextBuffer; uint8* __restrict Cursor; uint8* __restrict volatile Committed; uint8* __restrict Reaped; int32 volatile EtxOffset; int16 Partial; uint16 ThreadId; }; TRACELOG_API uint64 TimeGetTimestamp(); TRACELOG_API FWriteBuffer* Writer_NextBuffer(int32); TRACELOG_API FWriteBuffer* Writer_GetBuffer(); #if IS_MONOLITHIC extern thread_local FWriteBuffer* GTlsWriteBuffer; inline FWriteBuffer* Writer_GetBuffer() { return GTlsWriteBuffer; } #endif // IS_MONOLITHIC inline uint64 Writer_GetTimestamp(FWriteBuffer* Buffer) { uint64 Ret = TimeGetTimestamp() - Buffer->PrevTimestamp; Buffer->PrevTimestamp += Ret; return Ret; } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Field.h */ #if UE_TRACE_ENABLED /* Statically sized fields (e.g. UE_TRACE_EVENT_FIELD(float[4], Colours)) are * not supported as yet. No call for them. The following define is used to track * where and partially how to implement them */ #define STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT 0 namespace UE { namespace Trace { namespace Private { UE_TRACE_API void Field_WriteAuxData(uint32, const uint8*, int32); UE_TRACE_API void Field_WriteStringAnsi(uint32, const ANSICHAR*, int32); UE_TRACE_API void Field_WriteStringAnsi(uint32, const WIDECHAR*, int32); UE_TRACE_API void Field_WriteStringWide(uint32, const WIDECHAR*, int32); } // namespace Private template struct TFieldType; template <> struct TFieldType { enum { Tid = int(EFieldType::Bool), Size = sizeof(bool) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int8), Size = sizeof(int8) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int16), Size = sizeof(int16) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int32), Size = sizeof(int32) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int64), Size = sizeof(int64) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int8), Size = sizeof(uint8) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int16), Size = sizeof(uint16) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int32), Size = sizeof(uint32) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Int64), Size = sizeof(uint64) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Float32),Size = sizeof(float) }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::Float64),Size = sizeof(double) }; }; template struct TFieldType { enum { Tid = int(EFieldType::Pointer),Size = sizeof(void*) }; }; template struct TFieldType { enum { Tid = int(TFieldType::Tid)|int(EFieldType::Array), Size = 0, }; }; #if STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template struct TFieldType { enum { Tid = int(TFieldType::Tid)|int(EFieldType::Array), Size = sizeof(T[N]), }; }; #endif // STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template <> struct TFieldType { enum { Tid = int(EFieldType::AnsiString), Size = 0, }; }; template <> struct TFieldType { enum { Tid = int(EFieldType::WideString), Size = 0, }; }; struct FLiteralName { template explicit FLiteralName(const ANSICHAR (&Name)[Size]) : Ptr(Name) , Length(Size - 1) { static_assert(Size < 256, "Field name is too large"); } const ANSICHAR* Ptr; uint8 Length; }; struct FFieldDesc { FFieldDesc(const FLiteralName& Name, uint8 Type, uint16 Offset, uint16 Size) : Name(Name.Ptr) , ValueOffset(Offset) , ValueSize(Size) , NameSize(Name.Length) , TypeInfo(Type) { } const ANSICHAR* Name; uint16 ValueOffset; uint16 ValueSize; uint8 NameSize; uint8 TypeInfo; }; template struct TField; enum class EIndexPack { NumFieldsMax = 1 << FAuxHeader::FieldBits, NumFieldsShift = 8, NumFieldsMask = (1 << NumFieldsShift) - 1, AuxFieldCounter = 1 << NumFieldsShift, }; #define TRACE_PRIVATE_FIELD(InIndex, InOffset, Type) \ enum \ { \ Index = InIndex, \ Offset = InOffset, \ Tid = TFieldType::Tid, \ Size = TFieldType::Size, \ }; \ static_assert((Index & int(EIndexPack::NumFieldsMask)) < int(EIndexPack::NumFieldsMax), "Trace events may only have up to EIndexPack::NumFieldsMax fields"); \ private: \ FFieldDesc FieldDesc; \ public: \ TField(const FLiteralName& Name) \ : FieldDesc(Name, Tid, Offset, Size) \ { \ } template struct TField { TRACE_PRIVATE_FIELD(InIndex + int(EIndexPack::AuxFieldCounter), InOffset, Type[]); }; #if STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template struct TField { TRACE_PRIVATE_FIELD(InIndex, InOffset, Type[Count]); }; #endif // STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template struct TField { TRACE_PRIVATE_FIELD(InIndex + int(EIndexPack::AuxFieldCounter), InOffset, AnsiString); }; template struct TField { TRACE_PRIVATE_FIELD(InIndex + int(EIndexPack::AuxFieldCounter), InOffset, WideString); }; template struct TField { TRACE_PRIVATE_FIELD(InIndex, InOffset, Type); }; #undef TRACE_PRIVATE_FIELD enum EventProps {}; template struct TField { enum : uint16 { NumFields = InNumFields & int(EIndexPack::NumFieldsMask), Size = InSize, NumAuxFields = (InNumFields >> int(EIndexPack::NumFieldsShift)) & int(EIndexPack::NumFieldsMask), }; }; } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 EventNode.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { struct FEventInfo { enum { Flag_None = 0, Flag_Important = 1 << 0, Flag_MaybeHasAux = 1 << 1, Flag_NoSync = 1 << 2, }; FLiteralName LoggerName; FLiteralName EventName; const FFieldDesc* Fields; uint16 FieldCount; uint16 Flags; }; class FEventNode { public: struct FIter { const FEventNode* GetNext(); void* Inner; }; static FIter ReadNew(); static void OnConnect(); TRACELOG_API uint32 Initialize(const FEventInfo* InInfo); void Describe() const; uint32 GetUid() const { return Uid; } private: FEventNode* Next; const FEventInfo* Info; uint32 Uid; }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 ImportantLogScope.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { class FImportantLogScope { public: template static FImportantLogScope Enter(); template static FImportantLogScope Enter(uint32 ArrayDataSize); void operator += (const FImportantLogScope&) const; const FImportantLogScope& operator << (bool) const { return *this; } constexpr explicit operator bool () const { return true; } template struct FFieldSet; private: static FImportantLogScope EnterImpl(uint32 Uid, uint32 Size); uint8* Ptr; int32 BufferOffset; int32 AuxCursor; }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 SharedBuffer.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { struct FSharedBuffer { enum : uint32 { CursorShift = 10 }; enum : uint32 { RefBit = 1 << 0 }; enum : uint32 { RefInit = (1 << CursorShift) - 1 }; enum : uint32 { MaxSize = 1 << (32 - CursorShift - 1) }; int32 volatile Cursor; // also packs in a ref count. uint32 Size; uint32 Final; uint32 _Unused; FSharedBuffer* Next; }; struct FNextSharedBuffer { FSharedBuffer* Buffer; int32 RegionStart; }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 ImportantLogScope.inl */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { extern TRACELOG_API FSharedBuffer* volatile GSharedBuffer; TRACELOG_API FNextSharedBuffer Writer_NextSharedBuffer(FSharedBuffer*, int32, int32); template FORCENOINLINE FImportantLogScope FImportantLogScope::Enter(uint32 ArrayDataSize) { static_assert(uint32(T::EventFlags) & uint32(FEventInfo::Flag_MaybeHasAux), "Only important trace events with array-type fields need a size parameter to UE_TRACE_LOG()"); ArrayDataSize += sizeof(FAuxHeader) * T::EventProps_Meta::NumAuxFields; ArrayDataSize += 1; // for AuxDataTerminal uint32 Size = T::GetSize(); uint32 Uid = T::GetUid() >> EKnownEventUids::_UidShift; FImportantLogScope Ret = EnterImpl(Uid, Size + ArrayDataSize); Ret.AuxCursor += Size; Ret.Ptr[Ret.AuxCursor] = uint8(EKnownEventUids::AuxDataTerminal); return Ret; } template inline FImportantLogScope FImportantLogScope::Enter() { static_assert(!(uint32(T::EventFlags) & uint32(FEventInfo::Flag_MaybeHasAux)), "Important trace events with array-type fields must be traced with UE_TRACE_LOG(Logger, Event, Channel, ArrayDataSize)"); uint32 Size = T::GetSize(); uint32 Uid = T::GetUid() >> EKnownEventUids::_UidShift; return EnterImpl(Uid, Size); } inline FImportantLogScope FImportantLogScope::EnterImpl(uint32 Uid, uint32 Size) { FSharedBuffer* Buffer = AtomicLoadAcquire(&GSharedBuffer); int32 AllocSize = Size; AllocSize += sizeof(FImportantEventHeader); int32 NegSizeAndRef = 0 - ((AllocSize << FSharedBuffer::CursorShift) | FSharedBuffer::RefBit); int32 RegionStart = AtomicAddRelaxed(&(Buffer->Cursor), NegSizeAndRef); if (UNLIKELY(RegionStart + NegSizeAndRef < 0)) { FNextSharedBuffer Next = Writer_NextSharedBuffer(Buffer, RegionStart, NegSizeAndRef); Buffer = Next.Buffer; RegionStart = Next.RegionStart; } int32 Bias = (RegionStart >> FSharedBuffer::CursorShift); uint8* Out = (uint8*)Buffer - Bias; auto* Header = (FImportantEventHeader*)Out; Header->Uid = uint16(Uid); Header->Size = uint16(Size); FImportantLogScope Ret; Ret.Ptr = Header->Data; Ret.BufferOffset = int32(PTRINT(Buffer) - PTRINT(Ret.Ptr)); Ret.AuxCursor = 0; return Ret; } inline void FImportantLogScope::operator += (const FImportantLogScope&) const { auto* Buffer = (FSharedBuffer*)(Ptr + BufferOffset); AtomicAddRelease(&(Buffer->Cursor), int32(FSharedBuffer::RefBit)); } template struct FImportantLogScope::FFieldSet { static void Impl(FImportantLogScope* Scope, const Type& Value) { uint8* Dest = (uint8*)(Scope->Ptr) + FieldMeta::Offset; ::memcpy(Dest, &Value, sizeof(Type)); } }; template struct FImportantLogScope::FFieldSet { static void Impl(FImportantLogScope* Scope, Type const* Data, int32 Num) { uint32 Size = Num * sizeof(Type); auto* Header = (FAuxHeader*)(Scope->Ptr + Scope->AuxCursor); Header->Pack = Size << FAuxHeader::SizeShift; Header->Pack |= (FieldMeta::Index & int32(EIndexPack::NumFieldsMask)) << FAuxHeader::FieldShift; Header->Uid = uint8(EKnownEventUids::AuxData); memcpy(Header + 1, Data, Size); Scope->AuxCursor += sizeof(FAuxHeader) + Size; Scope->Ptr[Scope->AuxCursor] = uint8(EKnownEventUids::AuxDataTerminal); } }; template struct FImportantLogScope::FFieldSet { static void Impl(FImportantLogScope* Scope, const ANSICHAR* String, int32 Length=-1) { if (Length < 0) { Length = int32(strlen(String)); } auto* Header = (FAuxHeader*)(Scope->Ptr + Scope->AuxCursor); Header->Pack = Length << FAuxHeader::SizeShift; Header->Pack |= (FieldMeta::Index & int32(EIndexPack::NumFieldsMask)) << FAuxHeader::FieldShift; Header->Uid = uint8(EKnownEventUids::AuxData); memcpy(Header + 1, String, Length); Scope->AuxCursor += sizeof(FAuxHeader) + Length; Scope->Ptr[Scope->AuxCursor] = uint8(EKnownEventUids::AuxDataTerminal); } static void Impl(FImportantLogScope* Scope, const WIDECHAR* String, int32 Length=-1) { if (Length < 0) { Length = 0; for (const WIDECHAR* c = String; *c; ++c, ++Length); } auto* Header = (FAuxHeader*)(Scope->Ptr + Scope->AuxCursor); Header->Pack = Length << FAuxHeader::SizeShift; Header->Pack |= (FieldMeta::Index & int32(EIndexPack::NumFieldsMask)) << FAuxHeader::FieldShift; Header->Uid = uint8(EKnownEventUids::AuxData); auto* Out = (int8*)(Header + 1); for (int32 i = 0; i < Length; ++i) { *Out = int8(*String); ++Out; ++String; } Scope->AuxCursor += sizeof(FAuxHeader) + Length; Scope->Ptr[Scope->AuxCursor] = uint8(EKnownEventUids::AuxDataTerminal); } }; template struct FImportantLogScope::FFieldSet { static void Impl(FImportantLogScope* Scope, const WIDECHAR* String, int32 Length=-1) { if (Length < 0) { Length = 0; for (const WIDECHAR* c = String; *c; ++c, ++Length); } uint32 Size = Length * sizeof(WIDECHAR); auto* Header = (FAuxHeader*)(Scope->Ptr + Scope->AuxCursor); Header->Pack = Size << FAuxHeader::SizeShift; Header->Pack |= (FieldMeta::Index & int32(EIndexPack::NumFieldsMask)) << FAuxHeader::FieldShift; Header->Uid = uint8(EKnownEventUids::AuxData); memcpy(Header + 1, String, Size); Scope->AuxCursor += sizeof(FAuxHeader) + Size; Scope->Ptr[Scope->AuxCursor] = uint8(EKnownEventUids::AuxDataTerminal); } }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 LogScope.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { struct FWriteBuffer; template class TLogScope; class FLogScope { friend class FEventNode; public: template static auto Enter(); template static auto ScopedEnter(); template static auto ScopedStampedEnter(); void* GetPointer() const { return Ptr; } const FLogScope& operator << (bool) const { return *this; } constexpr explicit operator bool () const { return true; } template struct FFieldSet; protected: void Commit() const; void Commit(FWriteBuffer* __restrict LatestBuffer) const; private: template static auto EnterImpl(uint32 Uid, uint32 Size); template void EnterPrelude(uint32 Size); void Enter(uint32 Uid, uint32 Size); void EnterNoSync(uint32 Uid, uint32 Size); uint8* Ptr; FWriteBuffer* Buffer; }; template class TLogScope : public FLogScope { public: void operator += (const FLogScope&) const; }; class FScopedLogScope { public: ~FScopedLogScope(); void SetActive(); bool bActive = false; }; class FScopedStampedLogScope { public: ~FScopedStampedLogScope(); void SetActive(); bool bActive = false; }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 LogScope.inl */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { extern TRACELOG_API uint32 volatile GLogSerial; inline void FLogScope::Commit() const { AtomicStoreRelease((uint8**) &(Buffer->Committed), Buffer->Cursor); } inline void FLogScope::Commit(FWriteBuffer* __restrict LatestBuffer) const { if (LatestBuffer != Buffer) { AtomicStoreRelease((uint8**) &(LatestBuffer->Committed), LatestBuffer->Cursor); } Commit(); } template inline auto FLogScope::EnterImpl(uint32 Uid, uint32 Size) { TLogScope<(Flags & FEventInfo::Flag_MaybeHasAux) != 0> Ret; if ((Flags & FEventInfo::Flag_NoSync) != 0) { Ret.EnterNoSync(Uid, Size); } else { Ret.Enter(Uid, Size); } return Ret; } template inline void FLogScope::EnterPrelude(uint32 Size) { uint32 AllocSize = sizeof(HeaderType) + Size; Buffer = Writer_GetBuffer(); Buffer->Cursor += AllocSize; if (UNLIKELY(Buffer->Cursor > (uint8*)Buffer)) { Buffer = Writer_NextBuffer(AllocSize); } Ptr = Buffer->Cursor - Size; } inline void FLogScope::Enter(uint32 Uid, uint32 Size) { EnterPrelude(Size); auto* Header = (uint16*)(Ptr - sizeof(FEventHeaderSync::SerialHigh)); *(uint32*)(Header - 1) = uint32(AtomicAddRelaxed(&GLogSerial, 1u)); Header[-2] = uint16(Uid)|int32(EKnownEventUids::Flag_TwoByteUid); } inline void FLogScope::EnterNoSync(uint32 Uid, uint32 Size) { EnterPrelude(Size); auto* Header = (uint16*)(Ptr); Header[-1] = uint16(Uid)|int32(EKnownEventUids::Flag_TwoByteUid); } template inline void TLogScope::operator += (const FLogScope&) const { Commit(); } template inline void TLogScope::operator += (const FLogScope&) const { FWriteBuffer* LatestBuffer = Writer_GetBuffer(); LatestBuffer->Cursor[0] = uint8(EKnownEventUids::AuxDataTerminal << EKnownEventUids::_UidShift); LatestBuffer->Cursor++; Commit(LatestBuffer); } inline FScopedLogScope::~FScopedLogScope() { if (!bActive) { return; } uint8 LeaveUid = uint8(EKnownEventUids::LeaveScope << EKnownEventUids::_UidShift); FWriteBuffer* Buffer = Writer_GetBuffer(); if (UNLIKELY(int32((uint8*)Buffer - Buffer->Cursor) < int32(sizeof(LeaveUid)))) { Buffer = Writer_NextBuffer(0); } Buffer->Cursor[0] = LeaveUid; Buffer->Cursor += sizeof(LeaveUid); AtomicStoreRelease((uint8**) &(Buffer->Committed), Buffer->Cursor); } inline void FScopedLogScope::SetActive() { bActive = true; } inline FScopedStampedLogScope::~FScopedStampedLogScope() { if (!bActive) { return; } FWriteBuffer* Buffer = Writer_GetBuffer(); uint64 Stamp = Writer_GetTimestamp(Buffer); if (UNLIKELY(int32((uint8*)Buffer - Buffer->Cursor) < int32(sizeof(Stamp)))) { Buffer = Writer_NextBuffer(0); } Stamp <<= 8; Stamp += uint8(EKnownEventUids::LeaveScope_T) << EKnownEventUids::_UidShift; memcpy((uint64*)(Buffer->Cursor), &Stamp, sizeof(Stamp)); Buffer->Cursor += sizeof(Stamp); AtomicStoreRelease((uint8**) &(Buffer->Committed), Buffer->Cursor); } inline void FScopedStampedLogScope::SetActive() { bActive = true; } template FORCENOINLINE auto FLogScope::Enter() { uint32 Size = EventType::GetSize(); uint32 Uid = EventType::GetUid(); return EnterImpl(Uid, Size); } template FORCENOINLINE auto FLogScope::ScopedEnter() { uint8 EnterUid = uint8(EKnownEventUids::EnterScope << EKnownEventUids::_UidShift); FWriteBuffer* Buffer = Writer_GetBuffer(); if (UNLIKELY(int32((uint8*)Buffer - Buffer->Cursor) < int32(sizeof(EnterUid)))) { Buffer = Writer_NextBuffer(0); } Buffer->Cursor[0] = EnterUid; Buffer->Cursor += sizeof(EnterUid); AtomicStoreRelease((uint8**) &(Buffer->Committed), Buffer->Cursor); return Enter(); } template FORCENOINLINE auto FLogScope::ScopedStampedEnter() { uint64 Stamp; FWriteBuffer* Buffer = Writer_GetBuffer(); if (UNLIKELY(int32((uint8*)Buffer - Buffer->Cursor) < int32(sizeof(Stamp)))) { Buffer = Writer_NextBuffer(0); } Stamp = Writer_GetTimestamp(Buffer); Stamp <<= 8; Stamp += uint8(EKnownEventUids::EnterScope_T) << EKnownEventUids::_UidShift; memcpy((uint64*)(Buffer->Cursor), &Stamp, sizeof(Stamp)); Buffer->Cursor += sizeof(Stamp); AtomicStoreRelease((uint8**) &(Buffer->Committed), Buffer->Cursor); return Enter(); } template struct FLogScope::FFieldSet { static void Impl(FLogScope* Scope, const Type& Value) { uint8* Dest = (uint8*)(Scope->Ptr) + FieldMeta::Offset; ::memcpy(Dest, &Value, sizeof(Type)); } }; template struct FLogScope::FFieldSet { static void Impl(FLogScope*, Type const* Data, int32 Num) { static const uint32 Index = FieldMeta::Index & int32(EIndexPack::NumFieldsMask); int32 Size = (Num * sizeof(Type)) & (FAuxHeader::SizeLimit - 1) & ~(sizeof(Type) - 1); Field_WriteAuxData(Index, (const uint8*)Data, Size); } }; #if STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template struct FLogScope::FFieldSet { static void Impl(FLogScope*, Type const* Data, int32 Num=-1) = delete; }; #endif // STATICALLY_SIZED_ARRAY_FIELDS_SUPPORT template struct FLogScope::FFieldSet { static void Impl(FLogScope*, const ANSICHAR* String, int32 Length=-1) { if (Length < 0) { Length = int32(strlen(String)); } static const uint32 Index = FieldMeta::Index & int32(EIndexPack::NumFieldsMask); Field_WriteStringAnsi(Index, String, Length); } static void Impl(FLogScope*, const WIDECHAR* String, int32 Length=-1) { if (Length < 0) { Length = 0; for (const WIDECHAR* c = String; *c; ++c, ++Length); } static const uint32 Index = FieldMeta::Index & int32(EIndexPack::NumFieldsMask); Field_WriteStringAnsi(Index, String, Length); } }; template struct FLogScope::FFieldSet { static void Impl(FLogScope*, const WIDECHAR* String, int32 Length=-1) { if (Length < 0) { Length = 0; for (const WIDECHAR* c = String; *c; ++c, ++Length); } static const uint32 Index = FieldMeta::Index & int32(EIndexPack::NumFieldsMask); Field_WriteStringWide(Index, String, Length); } }; } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Trace.inl */ /* {{{1 Transport.h */ namespace UE { namespace Trace { enum ETransport : uint8 { _Unused = 0, Raw = 1, Packet = 2, TidPacket = 3, TidPacketSync = 4, Active = TidPacketSync, }; enum ETransportTid : uint32 { Events = 0, // used to describe events Internal = 1, // events to make the trace stream function Importants = Internal, // important/cached events Bias, // [Bias,End] = threads. Note bias can't be.. /* ... */ // ..changed as it breaks backwards compat :( End = 0x3ffe, // two msbs are user for packet markers Sync = 0x3fff, // see Writer_SendSync() }; namespace Private { struct FTidPacketBase { enum : uint16 { EncodedMarker = 0x8000, PartialMarker = 0x4000, ThreadIdMask = PartialMarker - 1, }; uint16 PacketSize; uint16 ThreadId; }; template struct TTidPacket : public FTidPacketBase { uint8 Data[DataSize]; }; template struct TTidPacketEncoded : public FTidPacketBase { uint16 DecodedSize; uint8 Data[DataSize]; }; using FTidPacket = TTidPacket<0>; using FTidPacketEncoded = TTidPacketEncoded<0>; static_assert(sizeof(FTidPacket) == 4, ""); static_assert(sizeof(FTidPacketEncoded) == 6, ""); } // namespace Private } // namespace Trace } // namespace UE /* {{{1 Platform.h */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR* Name, void (*Entry)()); void ThreadSleep(uint32 Milliseconds); void ThreadJoin(UPTRINT Handle); void ThreadDestroy(UPTRINT Handle); uint64 TimeGetFrequency(); TRACELOG_API uint64 TimeGetTimestamp(); UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port); UPTRINT TcpSocketListen(uint16 Port); int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out); bool TcpSocketHasData(UPTRINT Socket); int32 IoRead(UPTRINT Handle, void* Data, uint32 Size); bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size); void IoClose(UPTRINT Handle); UPTRINT FileOpen(const ANSICHAR* Path); } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 WriteBufferRedirect.h */ namespace UE { namespace Trace { namespace Private { extern thread_local FWriteBuffer* GTlsWriteBuffer; template class TWriteBufferRedirect { public: TWriteBufferRedirect(); ~TWriteBufferRedirect(); void Close(); uint8* GetData(); uint32 GetSize() const; uint32 GetCapacity() const; void Reset(); private: FWriteBuffer* PrevBuffer; uint8 Data[BufferSize]; FWriteBuffer Buffer; }; template inline TWriteBufferRedirect::TWriteBufferRedirect() { Reset(); PrevBuffer = GTlsWriteBuffer; GTlsWriteBuffer = &Buffer; } template inline TWriteBufferRedirect::~TWriteBufferRedirect() { Close(); } template inline void TWriteBufferRedirect::Close() { if (PrevBuffer == nullptr) { return; } GTlsWriteBuffer = PrevBuffer; PrevBuffer = nullptr; } template inline uint8* TWriteBufferRedirect::GetData() { return Buffer.Reaped; } template inline uint32 TWriteBufferRedirect::GetSize() const { return uint32(Buffer.Committed - Buffer.Reaped); } template inline uint32 TWriteBufferRedirect::GetCapacity() const { return BufferSize; } template inline void TWriteBufferRedirect::Reset() { Buffer.Cursor = Data + sizeof(uint32); Buffer.Committed = Buffer.Cursor; Buffer.Reaped = Buffer.Cursor; } } // namespace Private } // namespace Trace } // namespace UE #if TRACE_IMPLEMENT /* {{{1 BlockPool.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { void* Writer_MemoryAllocate(SIZE_T, uint32); void Writer_MemoryFree(void*, uint32); struct FPoolPage { FPoolPage* NextPage; uint32 AllocSize; }; struct FPoolBlockList { FWriteBuffer* Head; FWriteBuffer* Tail; }; #define T_ALIGN alignas(PLATFORM_CACHE_LINE_SIZE) static const uint32 GPoolBlockSize = 4 << 10; static const uint32 GPoolPageSize = GPoolBlockSize << 4; static const uint32 GPoolInitPageSize = GPoolBlockSize << 6; T_ALIGN static FWriteBuffer* volatile GPoolFreeList; // = nullptr; T_ALIGN static UPTRINT volatile GPoolFutex; // = 0 T_ALIGN static FPoolPage* volatile GPoolPageList; // = nullptr; static uint32 GPoolUsage; // = 0; #undef T_ALIGN static FPoolBlockList Writer_AddPageToPool(uint32 PageSize) { uint8* PageBase = (uint8*)Writer_MemoryAllocate(PageSize, PLATFORM_CACHE_LINE_SIZE); GPoolUsage += PageSize; uint32 BufferSize = GPoolBlockSize; BufferSize -= sizeof(FWriteBuffer); BufferSize -= sizeof(uint32); // to preceed event data with a small header when sending. uint8* FirstBlock = PageBase + GPoolBlockSize - sizeof(FWriteBuffer); uint8* Block = FirstBlock; for (int i = 1, n = PageSize / GPoolBlockSize; ; ++i) { auto* Buffer = (FWriteBuffer*)Block; Buffer->Size = uint16(BufferSize); if (i >= n) { break; } Buffer->NextBuffer = (FWriteBuffer*)(Block + GPoolBlockSize); Block += GPoolBlockSize; } FWriteBuffer* NextBuffer = (FWriteBuffer*)FirstBlock; NextBuffer->Size -= sizeof(FPoolPage); FPoolPage* PageListNode = (FPoolPage*)PageBase; PageListNode->NextPage = GPoolPageList; PageListNode->AllocSize = PageSize; GPoolPageList = PageListNode; return { NextBuffer, (FWriteBuffer*)Block }; } FWriteBuffer* Writer_AllocateBlockFromPool() { FWriteBuffer* Ret; while (true) { FWriteBuffer* Owned = AtomicLoadRelaxed(&GPoolFreeList); if (Owned != nullptr) { if (!AtomicCompareExchangeRelaxed(&GPoolFreeList, Owned->NextBuffer, Owned)) { PlatformYield(); continue; } } if (Owned != nullptr) { Ret = (FWriteBuffer*)Owned; break; } UPTRINT Futex = AtomicLoadRelaxed(&GPoolFutex); if (Futex || !AtomicCompareExchangeAcquire(&GPoolFutex, Futex + 1, Futex)) { ThreadSleep(0); continue; } FPoolBlockList BlockList = Writer_AddPageToPool(GPoolPageSize); Ret = BlockList.Head; for (auto* ListNode = BlockList.Tail;; PlatformYield()) { ListNode->NextBuffer = AtomicLoadRelaxed(&GPoolFreeList); if (AtomicCompareExchangeRelease(&GPoolFreeList, Ret->NextBuffer, ListNode->NextBuffer)) { break; } } for (;; Private::PlatformYield()) { if (AtomicCompareExchangeRelease(&GPoolFutex, 0, 1)) { break; } } break; } return Ret; } void Writer_FreeBlockListToPool(FWriteBuffer* Head, FWriteBuffer* Tail) { for (FWriteBuffer* ListNode = Tail;; PlatformYield()) { ListNode->NextBuffer = AtomicLoadRelaxed(&GPoolFreeList); if (AtomicCompareExchangeRelease(&GPoolFreeList, Head, ListNode->NextBuffer)) { break; } } } void Writer_InitializePool() { Writer_AddPageToPool(GPoolBlockSize); static_assert(GPoolPageSize >= 0x10000, "Page growth must be >= 64KB"); static_assert(GPoolInitPageSize >= 0x10000, "Initial page size must be >= 64KB"); } void Writer_ShutdownPool() { for (auto* Page = AtomicLoadRelaxed(&GPoolPageList); Page != nullptr;) { FPoolPage* NextPage = Page->NextPage; uint32 PageSize = (NextPage == nullptr) ? GPoolBlockSize : GPoolPageSize; Writer_MemoryFree(Page, PageSize); Page = NextPage; } } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Channel.cpp */ #include #if UE_TRACE_ENABLED namespace UE { namespace Trace { struct FTraceChannel : public FChannel { bool IsEnabled() const { return true; } explicit operator bool() const { return true; } }; static FTraceChannel TraceLogChannelDetail; FChannel& TraceLogChannel = TraceLogChannelDetail; UE_TRACE_EVENT_BEGIN(Trace, ChannelAnnounce, NoSync|Important) UE_TRACE_EVENT_FIELD(uint32, Id) UE_TRACE_EVENT_FIELD(bool, IsEnabled) UE_TRACE_EVENT_FIELD(bool, ReadOnly) UE_TRACE_EVENT_FIELD(AnsiString, Name) UE_TRACE_EVENT_END() UE_TRACE_EVENT_BEGIN(Trace, ChannelToggle, NoSync|Important) UE_TRACE_EVENT_FIELD(uint32, Id) UE_TRACE_EVENT_FIELD(bool, IsEnabled) UE_TRACE_EVENT_END() static FChannel* volatile GHeadChannel; // = nullptr; static FChannel* volatile GNewChannelList; // = nullptr; static bool GInitialized; static uint32 GetChannelHash(const ANSICHAR* Input, int32 Length) { if (Length > 0 && (Input[Length - 1] | 0x20) == 's') { --Length; } uint32 Result = 0x811c9dc5; for (; Length; ++Input, --Length) { Result ^= *Input | 0x20; // a cheap ASCII-only case insensitivity. Result *= 0x01000193; } return Result; } static uint32 GetChannelNameLength(const ANSICHAR* ChannelName) { size_t Len = uint32(strlen(ChannelName)); if (Len > 7) { if (strcmp(ChannelName + Len - 7, "Channel") == 0) { Len -= 7; } } return uint32(Len); } FChannel::Iter::~Iter() { if (Inner[2] == nullptr) { return; } using namespace Private; for (auto* Node = (FChannel*)Inner[2];; PlatformYield()) { Node->Next = AtomicLoadRelaxed(&GHeadChannel); if (AtomicCompareExchangeRelaxed(&GHeadChannel, (FChannel*)Inner[1], Node->Next)) { break; } } } const FChannel* FChannel::Iter::GetNext() { auto* Ret = (const FChannel*)Inner[0]; if (Ret != nullptr) { Inner[0] = Ret->Next; if (Inner[0] != nullptr) { Inner[2] = Inner[0]; } } return Ret; } FChannel::Iter FChannel::ReadNew() { using namespace Private; FChannel* List = AtomicLoadRelaxed(&GNewChannelList); if (List == nullptr) { return {}; } while (!AtomicCompareExchangeAcquire(&GNewChannelList, (FChannel*)nullptr, List)) { PlatformYield(); List = AtomicLoadRelaxed(&GNewChannelList); } return { { List, List, List } }; } void FChannel::Setup(const ANSICHAR* InChannelName, const InitArgs& InArgs) { using namespace Private; Name.Ptr = InChannelName; Name.Len = GetChannelNameLength(Name.Ptr); Name.Hash = GetChannelHash(Name.Ptr, Name.Len); Args = InArgs; for (;; PlatformYield()) { FChannel* HeadChannel = AtomicLoadRelaxed(&GNewChannelList); Next = HeadChannel; if (AtomicCompareExchangeRelease(&GNewChannelList, this, Next)) { break; } } if (GInitialized) { Enabled = -1; } } void FChannel::Announce() const { UE_TRACE_LOG(Trace, ChannelAnnounce, TraceLogChannel, Name.Len * sizeof(ANSICHAR)) << ChannelAnnounce.Id(Name.Hash) << ChannelAnnounce.IsEnabled(IsEnabled()) << ChannelAnnounce.ReadOnly(Args.bReadOnly) << ChannelAnnounce.Name(Name.Ptr, Name.Len); } void FChannel::Initialize() { ToggleAll(false); GInitialized = true; } void FChannel::ToggleAll(bool bEnabled) { using namespace Private; FChannel* ChannelLists[] = { AtomicLoadAcquire(&GNewChannelList), AtomicLoadAcquire(&GHeadChannel), }; for (FChannel* Channel : ChannelLists) { for (; Channel != nullptr; Channel = (FChannel*)(Channel->Next)) { Channel->Toggle(bEnabled); } } } FChannel* FChannel::FindChannel(const ANSICHAR* ChannelName) { using namespace Private; const uint32 ChannelNameLen = GetChannelNameLength(ChannelName); const uint32 ChannelNameHash = GetChannelHash(ChannelName, ChannelNameLen); FChannel* ChannelLists[] = { AtomicLoadAcquire(&GNewChannelList), AtomicLoadAcquire(&GHeadChannel), }; for (FChannel* Channel : ChannelLists) { for (; Channel != nullptr; Channel = Channel->Next) { if (Channel->Name.Hash == ChannelNameHash) { return Channel; } } } return nullptr; } void FChannel::EnumerateChannels(ChannelIterFunc Func, void* User) { using namespace Private; FChannel* ChannelLists[] = { AtomicLoadAcquire(&GNewChannelList), AtomicLoadAcquire(&GHeadChannel), }; for (FChannel* Channel : ChannelLists) { for (; Channel != nullptr; Channel = Channel->Next) { Func(Channel->Name.Ptr, Channel->IsEnabled(), User); } } } bool FChannel::Toggle(bool bEnabled) { using namespace Private; AtomicStoreRelaxed(&Enabled, bEnabled ? 1 : -1); UE_TRACE_LOG(Trace, ChannelToggle, TraceLogChannel) << ChannelToggle.Id(Name.Hash) << ChannelToggle.IsEnabled(IsEnabled()); return IsEnabled(); } bool FChannel::Toggle(const ANSICHAR* ChannelName, bool bEnabled) { if (FChannel* Channel = FChannel::FindChannel(ChannelName)) { return Channel->Toggle(bEnabled); } return false; } } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Codec.cpp */ THIRD_PARTY_INCLUDES_START #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 6239) #endif #if !defined(TRACE_PRIVATE_EXTERNAL_LZ4) # define LZ4_NAMESPACE Trace # undef LZ4_NAMESPACE # define TRACE_PRIVATE_LZ4_NAMESPACE ::Trace:: #else # define TRACE_PRIVATE_LZ4_NAMESPACE #endif #if defined(_MSC_VER) # pragma warning(pop) #endif THIRD_PARTY_INCLUDES_END namespace UE { namespace Trace { namespace Private { int32 Encode(const void* Src, int32 SrcSize, void* Dest, int32 DestSize) { return TRACE_PRIVATE_LZ4_NAMESPACE LZ4_compress_fast( (const char*)Src, (char*)Dest, SrcSize, DestSize, 1 // increase by 1 for small speed increase ); } uint32 GetEncodeMaxSize(uint32 InputSize) { return LZ4_COMPRESSBOUND(InputSize); } TRACELOG_API int32 Decode(const void* Src, int32 SrcSize, void* Dest, int32 DestSize) { return TRACE_PRIVATE_LZ4_NAMESPACE LZ4_decompress_safe((const char*)Src, (char*)Dest, SrcSize, DestSize); } } // namespace Private } // namespace Trace } // namespace UE /* {{{1 Control.cpp */ #if UE_TRACE_ENABLED #include namespace UE { namespace Trace { namespace Private { #if !defined(TRACE_PRIVATE_CONTROL_ENABLED) || TRACE_PRIVATE_CONTROL_ENABLED bool Writer_SendTo(const ANSICHAR*, uint32=0); bool Writer_WriteTo(const ANSICHAR*); bool Writer_Stop(); enum class EControlState : uint8 { Closed = 0, Listening, Accepted, Failed, }; struct FControlCommands { enum { Max = 8 }; struct { uint32 Hash; void* Param; void (*Thunk)(void*, uint32, ANSICHAR const* const*); } Commands[Max]; uint8 Count; }; static_assert(std::is_trivial(), "FControlCommands must be trivial"); static FControlCommands GControlCommands; static UPTRINT GControlListen = 0; static UPTRINT GControlSocket = 0; static EControlState GControlState; // = EControlState::Closed; static uint32 GControlPort = 1985; static uint32 Writer_ControlHash(const ANSICHAR* Word) { uint32 Hash = 5381; for (; *Word; (Hash = (Hash * 33) ^ *Word), ++Word); return Hash; } static bool Writer_ControlAddCommand( const ANSICHAR* Name, void* Param, void (*Thunk)(void*, uint32, ANSICHAR const* const*)) { if (GControlCommands.Count >= FControlCommands::Max) { return false; } uint32 Index = GControlCommands.Count++; GControlCommands.Commands[Index] = { Writer_ControlHash(Name), Param, Thunk }; return true; } static bool Writer_ControlDispatch(uint32 ArgC, ANSICHAR const* const* ArgV) { if (ArgC == 0) { return false; } uint32 Hash = Writer_ControlHash(ArgV[0]); --ArgC; ++ArgV; for (int i = 0, n = GControlCommands.Count; i < n; ++i) { const auto& Command = GControlCommands.Commands[i]; if (Command.Hash == Hash) { Command.Thunk(Command.Param, ArgC, ArgV); return true; } } return false; } static bool Writer_ControlListen() { GControlListen = TcpSocketListen(GControlPort); if (!GControlListen) { uint32 Seed = uint32(TimeGetTimestamp()); for (uint32 i = 0; i < 10 && !GControlListen; Seed *= 13, ++i) { uint32 Port = (Seed & 0x1fff) + 0x8000; GControlListen = TcpSocketListen(Port); if (GControlListen) { GControlPort = Port; break; } } } if (!GControlListen) { GControlState = EControlState::Failed; return false; } GControlState = EControlState::Listening; return true; } static bool Writer_ControlAccept() { UPTRINT Socket; int Return = TcpSocketAccept(GControlListen, Socket); if (Return <= 0) { if (Return == -1) { IoClose(GControlListen); GControlListen = 0; GControlState = EControlState::Failed; } return false; } GControlState = EControlState::Accepted; GControlSocket = Socket; return true; } static void Writer_ControlRecv() { ANSICHAR Buffer[512]; ANSICHAR* __restrict Head = Buffer; while (TcpSocketHasData(GControlSocket)) { int32 ReadSize = int32(UPTRINT(Buffer + sizeof(Buffer) - Head)); int32 Recvd = IoRead(GControlSocket, Head, ReadSize); if (Recvd <= 0) { IoClose(GControlSocket); GControlSocket = 0; GControlState = EControlState::Listening; break; } Head += Recvd; enum EParseState { CrLfSkip, WhitespaceSkip, Word, } ParseState = EParseState::CrLfSkip; uint32 ArgC = 0; const ANSICHAR* ArgV[16]; const ANSICHAR* __restrict Spent = Buffer; for (ANSICHAR* __restrict Cursor = Buffer; Cursor < Head; ++Cursor) { switch (ParseState) { case EParseState::CrLfSkip: if (*Cursor == '\n' || *Cursor == '\r') { continue; } ParseState = EParseState::WhitespaceSkip; /* [[fallthrough]] */ case EParseState::WhitespaceSkip: if (*Cursor == ' ' || *Cursor == '\0') { continue; } if (ArgC < UE_ARRAY_COUNT(ArgV)) { ArgV[ArgC] = Cursor; ++ArgC; } ParseState = EParseState::Word; /* [[fallthrough]] */ case EParseState::Word: if (*Cursor == ' ' || *Cursor == '\0') { *Cursor = '\0'; ParseState = EParseState::WhitespaceSkip; continue; } if (*Cursor == '\r' || *Cursor == '\n') { *Cursor = '\0'; Writer_ControlDispatch(ArgC, ArgV); ArgC = 0; Spent = Cursor + 1; ParseState = EParseState::CrLfSkip; continue; } break; } } int32 UnspentSize = int32(UPTRINT(Head - Spent)); if (UnspentSize) { memmove(Buffer, Spent, UnspentSize); } Head = Buffer + UnspentSize; } } uint32 Writer_GetControlPort() { return GControlPort; } void Writer_UpdateControl() { switch (GControlState) { case EControlState::Closed: if (!Writer_ControlListen()) { break; } /* [[fallthrough]] */ case EControlState::Listening: if (!Writer_ControlAccept()) { break; } /* [[fallthrough]] */ case EControlState::Accepted: Writer_ControlRecv(); break; } } void Writer_InitializeControl() { #if PLATFORM_SWITCH GControlState = EControlState::Failed; return; #endif Writer_ControlAddCommand("SendTo", nullptr, [] (void*, uint32 ArgC, ANSICHAR const* const* ArgV) { if (ArgC > 0) { Writer_SendTo(ArgV[0]); } } ); Writer_ControlAddCommand("WriteTo", nullptr, [] (void*, uint32 ArgC, ANSICHAR const* const* ArgV) { if (ArgC > 0) { Writer_WriteTo(ArgV[0]); } } ); Writer_ControlAddCommand("Stop", nullptr, [] (void*, uint32 ArgC, ANSICHAR const* const* ArgV) { Writer_Stop(); } ); Writer_ControlAddCommand("ToggleChannels", nullptr, [] (void*, uint32 ArgC, ANSICHAR const* const* ArgV) { if (ArgC < 2) { return; } const size_t BufferSize = 512; ANSICHAR Channels[BufferSize] = {}; ANSICHAR* Ctx; const bool bState = (ArgV[1][0] != '0'); FCStringAnsi::Strcpy(Channels, BufferSize, ArgV[0]); ANSICHAR* Channel = FCStringAnsi::Strtok(Channels, ",", &Ctx); while (Channel) { FChannel::Toggle(Channel, bState); Channel = FCStringAnsi::Strtok(nullptr, ",", &Ctx); } } ); } void Writer_ShutdownControl() { if (GControlListen) { IoClose(GControlListen); GControlListen = 0; } } #else void Writer_InitializeControl() {} void Writer_ShutdownControl() {} void Writer_UpdateControl() {} uint32 Writer_GetControlPort() { return ~0u; } #endif // TRACE_PRIVATE_CONTROL_ENABLED } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 EventNode.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { void Writer_InternalInitialize(); FEventNode* volatile GNewEventList; // = nullptr; FEventNode* GEventListHead;// = nullptr; FEventNode* GEventListTail;// = nullptr; const FEventNode* FEventNode::FIter::GetNext() { auto* Ret = (FEventNode*)Inner; if (Ret != nullptr) { Inner = Ret->Next; if (Inner == nullptr) { GEventListTail = Ret; } } return Ret; } FEventNode::FIter FEventNode::ReadNew() { FEventNode* EventList = AtomicExchangeAcquire(&GNewEventList, (FEventNode*)nullptr); if (EventList == nullptr) { return {}; } if (GEventListHead == nullptr) { GEventListHead = EventList; } else { GEventListTail->Next = EventList; } return { EventList }; } uint32 FEventNode::Initialize(const FEventInfo* InInfo) { if (Uid != 0) { return Uid; } Writer_InternalInitialize(); static uint32 volatile EventUidCounter; // = 0; uint32 NewUid = AtomicAddRelaxed(&EventUidCounter, 1u) + EKnownEventUids::User; if (NewUid >= uint32(EKnownEventUids::Max)) { return Uid = EKnownEventUids::Invalid; } uint32 UidFlags = 0; if (NewUid >= (1 << (8 - EKnownEventUids::_UidShift))) { UidFlags |= EKnownEventUids::Flag_TwoByteUid; } NewUid <<= EKnownEventUids::_UidShift; NewUid |= UidFlags; Info = InInfo; Uid = uint16(NewUid); for (;; PlatformYield()) { Next = AtomicLoadRelaxed(&GNewEventList); if (AtomicCompareExchangeRelease(&GNewEventList, this, Next)) { break; } } return Uid; } void FEventNode::Describe() const { const FLiteralName& LoggerName = Info->LoggerName; const FLiteralName& EventName = Info->EventName; uint32 NamesSize = LoggerName.Length + EventName.Length; for (uint32 i = 0; i < Info->FieldCount; ++i) { NamesSize += Info->Fields[i].NameSize; } uint32 EventSize = sizeof(FNewEventEvent); EventSize += sizeof(FNewEventEvent::Fields[0]) * Info->FieldCount; EventSize += NamesSize; FLogScope LogScope = FLogScope::EnterImpl(0, EventSize + sizeof(uint16)); auto* Ptr = (uint16*)(LogScope.GetPointer()); Ptr[-1] = EKnownEventUids::NewEvent; // Make event look like an important one. Ideally they are sent Ptr[ 0] = uint16(EventSize); // as important and not Writer_DescribeEvents()'s redirected buf. auto& Event = *(FNewEventEvent*)(Ptr + 1); Event.EventUid = uint16(Uid) >> EKnownEventUids::_UidShift; Event.LoggerNameSize = LoggerName.Length; Event.EventNameSize = EventName.Length; Event.Flags = 0; uint32 Flags = Info->Flags; if (Flags & FEventInfo::Flag_Important) Event.Flags |= uint8(EEventFlags::Important); if (Flags & FEventInfo::Flag_MaybeHasAux) Event.Flags |= uint8(EEventFlags::MaybeHasAux); if (Flags & FEventInfo::Flag_NoSync) Event.Flags |= uint8(EEventFlags::NoSync); Event.FieldCount = uint8(Info->FieldCount); for (uint32 i = 0; i < Info->FieldCount; ++i) { const FFieldDesc& Field = Info->Fields[i]; auto& Out = Event.Fields[i]; Out.Offset = Field.ValueOffset; Out.Size = Field.ValueSize; Out.TypeInfo = Field.TypeInfo; Out.NameSize = Field.NameSize; } uint8* Cursor = (uint8*)(Event.Fields + Info->FieldCount); auto WriteName = [&Cursor] (const ANSICHAR* Data, uint32 Size) { memcpy(Cursor, Data, Size); Cursor += Size; }; WriteName(LoggerName.Ptr, LoggerName.Length); WriteName(EventName.Ptr, EventName.Length); for (uint32 i = 0; i < Info->FieldCount; ++i) { const FFieldDesc& Field = Info->Fields[i]; WriteName(Field.Name, Field.NameSize); } LogScope.Commit(); } void FEventNode::OnConnect() { if (GEventListHead == nullptr) { return; } GEventListTail->Next = AtomicExchangeAcquire(&GNewEventList, GEventListHead); GEventListHead = GEventListTail = nullptr; } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Field.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { template static void Field_WriteAuxData(uint32 Index, int32 Size, CallbackType&& Callback) { static_assert( sizeof(Private::FWriteBuffer::Overflow) >= sizeof(FAuxHeader) + sizeof(uint8 /*AuxDataTerminal*/), "FWriteBuffer::Overflow is not large enough" ); if (Size == 0) { return; } FWriteBuffer* Buffer = Writer_GetBuffer(); auto* Header = (FAuxHeader*)(Buffer->Cursor); Header->Pack = Size << FAuxHeader::SizeShift; Header->Pack |= Index << FAuxHeader::FieldShift; Header->Uid = uint8(EKnownEventUids::AuxData) << EKnownEventUids::_UidShift; Buffer->Cursor += sizeof(FAuxHeader); bool bCommit = ((uint8*)Header == Buffer->Committed); while (true) { if (Buffer->Cursor >= (uint8*)Buffer) { if (bCommit) { AtomicStoreRelease(&(uint8* volatile&)(Buffer->Committed), Buffer->Cursor); } Buffer = Writer_NextBuffer(0); Buffer->Partial = 1; bCommit = true; } int32 Remaining = int32((uint8*)Buffer - Buffer->Cursor); int32 SegmentSize = (Remaining < Size) ? Remaining : Size; Callback(Buffer->Cursor, SegmentSize); Buffer->Cursor += SegmentSize; Size -= SegmentSize; if (Size <= 0) { break; } } if (bCommit) { AtomicStoreRelease(&(uint8* volatile&)(Buffer->Committed), Buffer->Cursor); } } void Field_WriteAuxData(uint32 Index, const uint8* Data, int32 Size) { auto MemcpyLambda = [&Data] (uint8* Cursor, int32 NumBytes) { memcpy(Cursor, Data, NumBytes); Data += NumBytes; }; return Field_WriteAuxData(Index, Size, MemcpyLambda); } void Field_WriteStringAnsi(uint32 Index, const WIDECHAR* String, int32 Length) { int32 Size = Length; Size &= (FAuxHeader::SizeLimit - 1); auto WriteLambda = [&String] (uint8* Cursor, int32 NumBytes) { for (int32 i = 0; i < NumBytes; ++i) { *Cursor = uint8(*String & 0x7f); Cursor++; String++; } }; return Field_WriteAuxData(Index, Size, WriteLambda); } void Field_WriteStringAnsi(uint32 Index, const ANSICHAR* String, int32 Length) { int32 Size = Length * sizeof(String[0]); Size &= (FAuxHeader::SizeLimit - 1); // a very crude "clamp" return Field_WriteAuxData(Index, (const uint8*)String, Size); } void Field_WriteStringWide(uint32 Index, const WIDECHAR* String, int32 Length) { int32 Size = Length * sizeof(String[0]); Size &= (FAuxHeader::SizeLimit - 1); // (see above) return Field_WriteAuxData(Index, (const uint8*)String, Size); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Tail.cpp */ #if UE_TRACE_ENABLED #include #include #include namespace UE { namespace Trace { namespace Private { static_assert(ETransport::Active == ETransport::TidPacketSync, "Tail-tracing is transport aware"); uint32 GetEncodeMaxSize(uint32); int32 Encode(const void*, int32, void*, int32); void* Writer_MemoryAllocate(SIZE_T, uint32); void Writer_MemoryFree(void*, uint32); void Writer_SendData(uint32, uint8* __restrict, uint32); void Writer_SendDataRaw(const void*, uint32); class FPacketRing { public: struct FRange { const void* Data; uint32 Size; }; void Initialize(uint32 InSize); void Shutdown(); void Reset(); uint32 GetSize() const; bool IsActive() const; FRange GetBackPackets() const; FRange GetFrontPackets() const; template void IterateRanges(CallbackType&& Callback); template PacketType* Append(uint32 InSize); void BackUp(uint32 InSize); private: FTidPacketBase* AppendImpl(uint32 InSize); uint8* Data; uint32 Size; uint32 Cursor; uint32 Left; uint32 Right; }; static_assert(std::is_trivial(), "FPacketRing must be trivial"); void FPacketRing::Initialize(uint32 InSize) { Data = (uint8*)Writer_MemoryAllocate(InSize, 16); Size = InSize; Reset(); } void FPacketRing::Shutdown() { Writer_MemoryFree(Data, Size); Data = nullptr; } void FPacketRing::Reset() { Cursor = 0; Left = Right = Size; } uint32 FPacketRing::GetSize() const { return Size; } bool FPacketRing::IsActive() const { return Data != nullptr; } FPacketRing::FRange FPacketRing::GetBackPackets() const { return { Data + Left, Right - Left }; } FPacketRing::FRange FPacketRing::GetFrontPackets() const { return { Data, Cursor }; } template void FPacketRing::IterateRanges(CallbackType&& Callback) { FPacketRing::FRange Ranges[] = { GetBackPackets(), GetFrontPackets() }; for (const auto& Range : Ranges) { if (Range.Size == 0) { continue; } Callback(Range); } } template PacketType* FPacketRing::Append(uint32 InSize) { FTidPacketBase* Ptr = AppendImpl(InSize + sizeof(PacketType)); return static_cast(Ptr); } void FPacketRing::BackUp(uint32 InSize) { Cursor -= InSize; } FTidPacketBase* FPacketRing::AppendImpl(uint32 InSize) { if (UNLIKELY(InSize > Size)) { Reset(); return nullptr; } uint32 NextCursor = Cursor + InSize; if (UNLIKELY(NextCursor > Size)) { Left = 0; Right = Cursor; Cursor = 0; NextCursor = InSize; } while (true) { if (LIKELY(Left >= NextCursor)) { break; } if (UNLIKELY(Left >= Right)) { break; } const auto* TidPacket = (const FTidPacketBase*)(Data + Left); Left += TidPacket->PacketSize; } auto* TidPacket = (FTidPacketBase*)(Data + Cursor); TidPacket->PacketSize = uint16(InSize); Cursor = NextCursor; return TidPacket; } static FPacketRing GPacketRing; // = {}; void Writer_TailAppend(uint32 ThreadId, uint8* __restrict Data, uint32 Size, bool bPartial) { if (!GPacketRing.IsActive()) { return Writer_SendData(ThreadId, Data, Size); } if (uint32(Size + sizeof(FTidPacketEncoded)) > GPacketRing.GetSize()) { GPacketRing.Reset(); return Writer_SendData(ThreadId, Data, Size); } ThreadId &= FTidPacketBase::ThreadIdMask; ThreadId |= bPartial ? FTidPacketBase::PartialMarker : 0; if (Size <= 384) { auto* Packet = GPacketRing.Append(Size); Packet->ThreadId = uint16(ThreadId); ::memcpy(Packet->Data, Data, Size); Writer_SendDataRaw(Packet, Packet->PacketSize); return; } uint32 EncodeMaxSize = GetEncodeMaxSize(Size); auto* Packet = GPacketRing.Append(EncodeMaxSize); Packet->ThreadId = uint16(ThreadId); Packet->ThreadId |= FTidPacketBase::EncodedMarker; Packet->DecodedSize = uint16(Size); uint32 EncodeSize = Encode(Data, Size, Packet->Data, EncodeMaxSize); uint32 BackUp = EncodeMaxSize - EncodeSize; GPacketRing.BackUp(BackUp); Packet->PacketSize -= uint16(BackUp); Writer_SendDataRaw(Packet, Packet->PacketSize); } void Writer_TailOnConnect() { if (!GPacketRing.IsActive()) { return; } GPacketRing.IterateRanges([] (const FPacketRing::FRange& Range) { Writer_SendDataRaw(Range.Data, Range.Size); }); } void Writer_InitializeTail(int32 BufferSize) { #if defined(STRESS_PACKET_RING) static void StressRingPacket(); StressRingPacket(); #endif if (BufferSize <= 0) { return; } uint32 Rounding = (1 << 10) - 1; BufferSize = (BufferSize + Rounding) & ~Rounding; if (BufferSize < (128 << 10)) { BufferSize = 128 << 10; } GPacketRing.Initialize(BufferSize); } void Writer_ShutdownTail() { GPacketRing.Shutdown(); } #if defined(STRESS_PACKET_RING) static void StressRingPacket() { FPacketRing Ring; Ring.Initialize(300); uint32 Bits = 0x0493'0493; for (int32 i = 0; i < 1024; ++i) { FTidPacket* Packet = Ring.Append((Bits & 0x1f) + 6); Packet->ThreadId = i; if ((Bits & 0x15) == 0) { Packet->ThreadId |= FTidPacketBase::PartialMarker; } Ring.IterateRanges([] (const FPacketRing::FRange&) { /* nop */ }); Bits = (Bits ^ 0xa93a'93a9) * 0x0493; } for (int32 i = 7; i < 448; i += 67) { if (auto* Packet = Ring.Append(i)) { Packet->ThreadId = 0; } } } #endif // STRESS_PACKET_RING } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* FPacketRing ring-buffers packets. Internally the buffer is divided up into two ranges; [0-Cursor) and [Left-Right) which are initially empty; 0 L C----------------------------------------------------------------------R A packet consists of a size and a opaque blob of data. Reading the sizes allows one to stride through the packets. L 0[SZ]==============>[SZ]=============>[SZ]=======>C--------------------R Eventually the next packet will not fit in the buffer because the next cursor (N) is off the buffer's end; L 0[SZ]==============>[SZ]=============>[SZ]=======>[SZ]==========>C-----R [SZ]========>N When this happens the 0-Cursor range is transferred to Left-Right and the 0-Cursor range is set such that it can contain the new packet being added. L[SZ]==============>[SZ]=============>[SZ]=======>[SZ]==========>R-----| 0[SZ]========>C The two ranges now overlap so packets are then removed from Left until there is enough space for the new packet. 0[SZ]========>C-----L[SZ]============>[SZ]=======>[SZ]==========>R-----| The Left-Right range has the oldest packets. Left will eventually advance to meet Right at which point the Left-Right range becomes empty The process above repeats as if the buffer was being filled for the first time. */ /* {{{1 TlsBuffer.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { void Writer_TailAppend(uint32, uint8* __restrict, uint32, bool); FWriteBuffer* Writer_AllocateBlockFromPool(); uint32 Writer_GetThreadId(); void Writer_FreeBlockListToPool(FWriteBuffer*, FWriteBuffer*); extern uint64 GStartCycle; extern FStatistics GTraceStatistics; UE_TRACE_EVENT_BEGIN($Trace, ThreadTiming, NoSync) UE_TRACE_EVENT_FIELD(uint64, BaseTimestamp) UE_TRACE_EVENT_END() #define T_ALIGN alignas(PLATFORM_CACHE_LINE_SIZE) static FWriteBuffer GNullWriteBuffer = { {}, 0, 0, nullptr, nullptr, (uint8*)&GNullWriteBuffer }; thread_local FWriteBuffer* GTlsWriteBuffer = &GNullWriteBuffer; static FWriteBuffer* __restrict GActiveThreadList; // = nullptr; T_ALIGN static FWriteBuffer* volatile GNewThreadList; // = nullptr; #undef T_ALIGN #if !IS_MONOLITHIC TRACELOG_API FWriteBuffer* Writer_GetBuffer() { return GTlsWriteBuffer; } #endif static FWriteBuffer* Writer_NextBufferInternal() { FWriteBuffer* NextBuffer = Writer_AllocateBlockFromPool(); NextBuffer->Cursor = (uint8*)NextBuffer - NextBuffer->Size; NextBuffer->Committed = NextBuffer->Cursor; NextBuffer->Reaped = NextBuffer->Cursor; NextBuffer->EtxOffset = 0 - int32(sizeof(FWriteBuffer)); NextBuffer->NextBuffer = nullptr; FWriteBuffer* CurrentBuffer = GTlsWriteBuffer; if (CurrentBuffer == &GNullWriteBuffer) { NextBuffer->ThreadId = uint16(Writer_GetThreadId()); NextBuffer->PrevTimestamp = TimeGetTimestamp(); NextBuffer->Partial = 0; GTlsWriteBuffer = NextBuffer; UE_TRACE_LOG($Trace, ThreadTiming, TraceLogChannel) << ThreadTiming.BaseTimestamp(NextBuffer->PrevTimestamp - GStartCycle); for (;; PlatformYield()) { NextBuffer->NextThread = AtomicLoadRelaxed(&GNewThreadList); if (AtomicCompareExchangeRelease(&GNewThreadList, NextBuffer, NextBuffer->NextThread)) { break; } } } else { CurrentBuffer->NextBuffer = NextBuffer; NextBuffer->ThreadId = CurrentBuffer->ThreadId; NextBuffer->PrevTimestamp = CurrentBuffer->PrevTimestamp; NextBuffer->Partial = 0; GTlsWriteBuffer = NextBuffer; int32 EtxOffset = int32(PTRINT((uint8*)(CurrentBuffer) - CurrentBuffer->Cursor)); AtomicStoreRelease(&(CurrentBuffer->EtxOffset), EtxOffset); } return NextBuffer; } TRACELOG_API FWriteBuffer* Writer_NextBuffer(int32 Size) { FWriteBuffer* CurrentBuffer = GTlsWriteBuffer; if (CurrentBuffer != &GNullWriteBuffer) { CurrentBuffer->Cursor -= Size; } FWriteBuffer* NextBuffer = Writer_NextBufferInternal(); if (Size >= NextBuffer->Size) { return nullptr; } NextBuffer->Cursor += Size; return NextBuffer; } static bool Writer_DrainBuffer(uint32 ThreadId, FWriteBuffer* Buffer) { uint8* Committed = AtomicLoadRelaxed((uint8**)&Buffer->Committed); if (uint32 SizeToReap = uint32(Committed - Buffer->Reaped)) { #if TRACE_PRIVATE_STATISTICS GTraceStatistics.BytesTraced += SizeToReap; #endif bool bPartial = (Buffer->Partial == 1); bPartial &= UPTRINT(Buffer->Reaped + Buffer->Size) == UPTRINT(Buffer); Writer_TailAppend(ThreadId, Buffer->Reaped, SizeToReap, bPartial); Buffer->Reaped = Committed; } int32 EtxOffset = AtomicLoadAcquire(&Buffer->EtxOffset); return ((uint8*)Buffer - EtxOffset) > Committed; } void Writer_DrainBuffers() { struct FRetireList { FWriteBuffer* __restrict Head = nullptr; FWriteBuffer* __restrict Tail = nullptr; void Insert(FWriteBuffer* __restrict Buffer) { Buffer->NextBuffer = Head; Head = Buffer; Tail = (Tail != nullptr) ? Tail : Head; } }; FWriteBuffer* __restrict NewThreadList = AtomicExchangeAcquire(&GNewThreadList, (FWriteBuffer*)nullptr); FWriteBuffer* __restrict NewThreadCursor = NewThreadList; NewThreadList = nullptr; while (NewThreadCursor != nullptr) { FWriteBuffer* __restrict NextThread = NewThreadCursor->NextThread; NewThreadCursor->NextThread = NewThreadList; NewThreadList = NewThreadCursor; NewThreadCursor = NextThread; } FRetireList RetireList; FWriteBuffer* __restrict ActiveThreadList = GActiveThreadList; GActiveThreadList = nullptr; for (FWriteBuffer* __restrict Buffer : { ActiveThreadList, NewThreadList }) { for (FWriteBuffer* __restrict NextThread; Buffer != nullptr; Buffer = NextThread) { NextThread = Buffer->NextThread; uint32 ThreadId = Buffer->ThreadId; for (FWriteBuffer* __restrict NextBuffer; Buffer != nullptr; Buffer = NextBuffer) { if (Writer_DrainBuffer(ThreadId, Buffer)) { break; } NextBuffer = Buffer->NextBuffer; RetireList.Insert(Buffer); } if (Buffer != nullptr) { Buffer->NextThread = GActiveThreadList; GActiveThreadList = Buffer; } } } if (RetireList.Head != nullptr) { Writer_FreeBlockListToPool(RetireList.Head, RetireList.Tail); } } void Writer_EndThreadBuffer() { if (GTlsWriteBuffer == &GNullWriteBuffer) { return; } int32 EtxOffset = int32(PTRINT((uint8*)GTlsWriteBuffer - GTlsWriteBuffer->Cursor)); AtomicStoreRelaxed(&(GTlsWriteBuffer->EtxOffset), EtxOffset); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 Trace.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { void Writer_MemorySetHooks(AllocFunc, FreeFunc); void Writer_Initialize(const FInitializeDesc&); void Writer_WorkerCreate(); void Writer_Shutdown(); void Writer_Update(); bool Writer_SendTo(const ANSICHAR*, uint32); bool Writer_WriteTo(const ANSICHAR*); bool Writer_IsTracing(); bool Writer_Stop(); uint32 Writer_GetThreadId(); extern FStatistics GTraceStatistics; } // namespace Private template static uint32 ToAnsiCheap(ANSICHAR (&Dest)[DestSize], const SRC_TYPE* Src) { const SRC_TYPE* Cursor = Src; for (ANSICHAR& Out : Dest) { Out = ANSICHAR(*Cursor++ & 0x7f); if (Out == '\0') { break; } } Dest[DestSize - 1] = '\0'; return uint32(UPTRINT(Cursor - Src)); }; void SetMemoryHooks(AllocFunc Alloc, FreeFunc Free) { Private::Writer_MemorySetHooks(Alloc, Free); } void Initialize(const FInitializeDesc& Desc) { Private::Writer_Initialize(Desc); FChannel::Initialize(); } void Shutdown() { Private::Writer_Shutdown(); } void Update() { Private::Writer_Update(); } void GetStatistics(FStatistics& Out) { Out = Private::GTraceStatistics; } bool SendTo(const TCHAR* InHost, uint32 Port) { char Host[256]; ToAnsiCheap(Host, InHost); return Private::Writer_SendTo(Host, Port); } bool WriteTo(const TCHAR* InPath) { char Path[512]; ToAnsiCheap(Path, InPath); return Private::Writer_WriteTo(Path); } bool IsTracing() { return Private::Writer_IsTracing(); } bool Stop() { return Private::Writer_Stop(); } bool IsChannel(const TCHAR* ChannelName) { ANSICHAR ChannelNameA[64]; ToAnsiCheap(ChannelNameA, ChannelName); return FChannel::FindChannel(ChannelNameA) != nullptr; } bool ToggleChannel(const TCHAR* ChannelName, bool bEnabled) { ANSICHAR ChannelNameA[64]; ToAnsiCheap(ChannelNameA, ChannelName); return FChannel::Toggle(ChannelNameA, bEnabled); } void EnumerateChannels(ChannelIterFunc IterFunc, void* User) { FChannel::EnumerateChannels(IterFunc, User); } void StartWorkerThread() { Private::Writer_WorkerCreate(); } UE_TRACE_CHANNEL_EXTERN(TraceLogChannel) UE_TRACE_EVENT_BEGIN($Trace, ThreadInfo, NoSync|Important) UE_TRACE_EVENT_FIELD(uint32, ThreadId) UE_TRACE_EVENT_FIELD(uint32, SystemId) UE_TRACE_EVENT_FIELD(int32, SortHint) UE_TRACE_EVENT_FIELD(AnsiString, Name) UE_TRACE_EVENT_END() UE_TRACE_EVENT_BEGIN($Trace, ThreadGroupBegin, NoSync|Important) UE_TRACE_EVENT_FIELD(AnsiString, Name) UE_TRACE_EVENT_END() UE_TRACE_EVENT_BEGIN($Trace, ThreadGroupEnd, NoSync|Important) UE_TRACE_EVENT_END() void ThreadRegister(const TCHAR* Name, uint32 SystemId, int32 SortHint) { ANSICHAR NameA[96]; uint32 ThreadId = Private::Writer_GetThreadId(); uint32 NameLen = ToAnsiCheap(NameA, Name); UE_TRACE_LOG($Trace, ThreadInfo, TraceLogChannel, NameLen * sizeof(ANSICHAR)) << ThreadInfo.ThreadId(ThreadId) << ThreadInfo.SystemId(SystemId) << ThreadInfo.SortHint(SortHint) << ThreadInfo.Name(NameA, NameLen); } void ThreadGroupBegin(const TCHAR* Name) { ANSICHAR NameA[96]; uint32 NameLen = ToAnsiCheap(NameA, Name); UE_TRACE_LOG($Trace, ThreadGroupBegin, TraceLogChannel, NameLen * sizeof(ANSICHAR)) << ThreadGroupBegin.Name(Name, NameLen); } void ThreadGroupEnd() { UE_TRACE_LOG($Trace, ThreadGroupEnd, TraceLogChannel); } } // namespace Trace } // namespace UE #else TRACELOG_API int TraceLogExportedSymbol = 0; #endif // UE_TRACE_ENABLED /* {{{1 Writer.cpp */ #if UE_TRACE_ENABLED #include #include #if PLATFORM_WINDOWS # define TRACE_PRIVATE_STOMP 0 // 1=overflow, 2=underflow # if TRACE_PRIVATE_STOMP # endif #else # define TRACE_PRIVATE_STOMP 0 #endif #ifndef TRACE_PRIVATE_BUFFER_SEND # define TRACE_PRIVATE_BUFFER_SEND 0 #endif namespace UE { namespace Trace { namespace Private { int32 Encode(const void*, int32, void*, int32); void Writer_SendData(uint32, uint8* __restrict, uint32); void Writer_InitializeTail(int32); void Writer_ShutdownTail(); void Writer_TailOnConnect(); void Writer_InitializeSharedBuffers(); void Writer_ShutdownSharedBuffers(); void Writer_UpdateSharedBuffers(); void Writer_InitializeCache(); void Writer_ShutdownCache(); void Writer_CacheOnConnect(); void Writer_InitializePool(); void Writer_ShutdownPool(); void Writer_DrainBuffers(); void Writer_EndThreadBuffer(); uint32 Writer_GetControlPort(); void Writer_UpdateControl(); void Writer_InitializeControl(); void Writer_ShutdownControl(); bool Writer_IsTracing(); UE_TRACE_EVENT_BEGIN($Trace, NewTrace, Important|NoSync) UE_TRACE_EVENT_FIELD(uint64, StartCycle) UE_TRACE_EVENT_FIELD(uint64, CycleFrequency) UE_TRACE_EVENT_FIELD(uint16, Endian) UE_TRACE_EVENT_FIELD(uint8, PointerSize) UE_TRACE_EVENT_END() static bool GInitialized; // = false; FStatistics GTraceStatistics; // = {}; uint64 GStartCycle; // = 0; TRACELOG_API uint32 volatile GLogSerial; // = 0; static uint32 GUpdateCounter; // = 0; struct FWriteTlsContext { ~FWriteTlsContext(); uint32 GetThreadId(); private: uint32 ThreadId = 0; }; FWriteTlsContext::~FWriteTlsContext() { if (GInitialized) { Writer_EndThreadBuffer(); } } uint32 FWriteTlsContext::GetThreadId() { if (ThreadId) { return ThreadId; } static uint32 volatile Counter; ThreadId = AtomicAddRelaxed(&Counter, 1u) + ETransportTid::Bias; return ThreadId; } thread_local FWriteTlsContext GTlsContext; uint32 Writer_GetThreadId() { return GTlsContext.GetThreadId(); } void* (*AllocHook)(SIZE_T, uint32); // = nullptr void (*FreeHook)(void*, SIZE_T); // = nullptr void Writer_MemorySetHooks(decltype(AllocHook) Alloc, decltype(FreeHook) Free) { AllocHook = Alloc; FreeHook = Free; } void* Writer_MemoryAllocate(SIZE_T Size, uint32 Alignment) { TWriteBufferRedirect<1 << 10> TraceData; void* Ret = nullptr; #if TRACE_PRIVATE_STOMP static uint8* Base; if (Base == nullptr) { Base = (uint8*)VirtualAlloc(0, 1ull << 40, MEM_RESERVE, PAGE_READWRITE); } static SIZE_T PageSize = 4096; Base += PageSize; uint8* NextBase = Base + ((PageSize - 1 + Size) & ~(PageSize - 1)); VirtualAlloc(Base, SIZE_T(NextBase - Base), MEM_COMMIT, PAGE_READWRITE); #if TRACE_PRIVATE_STOMP == 1 Ret = NextBase - Size; #elif TRACE_PRIVATE_STOMP == 2 Ret = Base; #endif Base = NextBase; #else // TRACE_PRIVATE_STOMP if (AllocHook != nullptr) { Ret = AllocHook(Size, Alignment); } else { #if defined(_MSC_VER) Ret = _aligned_malloc(Size, Alignment); #elif (defined(__ANDROID_API__) && __ANDROID_API__ < 28) || defined(__APPLE__) posix_memalign(&Ret, Alignment, Size); #else Ret = aligned_alloc(Alignment, Size); #endif } #endif // TRACE_PRIVATE_STOMP #if TRACE_PRIVATE_STATISTICS AtomicAddRelaxed(>raceStatistics.MemoryUsed, uint64(Size)); #endif return Ret; } void Writer_MemoryFree(void* Address, uint32 Size) { #if TRACE_PRIVATE_STOMP if (Address == nullptr) { return; } *(uint8*)Address = 0xfe; MEMORY_BASIC_INFORMATION MemInfo; VirtualQuery(Address, &MemInfo, sizeof(MemInfo)); DWORD Unused; VirtualProtect(MemInfo.BaseAddress, MemInfo.RegionSize, PAGE_READONLY, &Unused); #else // TRACE_PRIVATE_STOMP TWriteBufferRedirect<1 << 10> TraceData; if (FreeHook != nullptr) { FreeHook(Address, Size); } else { #if defined(_MSC_VER) _aligned_free(Address); #else free(Address); #endif } #endif // TRACE_PRIVATE_STOMP #if TRACE_PRIVATE_STATISTICS AtomicAddRelaxed(>raceStatistics.MemoryUsed, uint64(-int64(Size))); #endif } static UPTRINT GDataHandle; // = 0 UPTRINT GPendingDataHandle; // = 0 #if TRACE_PRIVATE_BUFFER_SEND static const SIZE_T GSendBufferSize = 1 << 20; // 1Mb uint8* GSendBuffer; // = nullptr; uint8* GSendBufferCursor; // = nullptr; static bool Writer_FlushSendBuffer() { if( GSendBufferCursor > GSendBuffer ) { if (!IoWrite(GDataHandle, GSendBuffer, GSendBufferCursor - GSendBuffer)) { IoClose(GDataHandle); GDataHandle = 0; return false; } GSendBufferCursor = GSendBuffer; } return true; } #else static bool Writer_FlushSendBuffer() { return true; } #endif static void Writer_SendDataImpl(const void* Data, uint32 Size) { #if TRACE_PRIVATE_STATISTICS GTraceStatistics.BytesSent += Size; #endif #if TRACE_PRIVATE_BUFFER_SEND if (GSendBufferCursor + Size > GSendBuffer + GSendBufferSize) { if (!Writer_FlushSendBuffer()) { return; } } if (Size > GSendBufferSize) { if (!IoWrite(GDataHandle, Data, Size)) { IoClose(GDataHandle); GDataHandle = 0; } } else { memcpy(GSendBufferCursor, Data, Size); GSendBufferCursor += Size; } #else if (!IoWrite(GDataHandle, Data, Size)) { IoClose(GDataHandle); GDataHandle = 0; } #endif } void Writer_SendDataRaw(const void* Data, uint32 Size) { if (!GDataHandle) { return; } Writer_SendDataImpl(Data, Size); } void Writer_SendData(uint32 ThreadId, uint8* __restrict Data, uint32 Size) { static_assert(ETransport::Active == ETransport::TidPacketSync, "Active should be set to what the compiled code uses. It is used to track places that assume transport packet format"); if (!GDataHandle) { return; } if (Size <= 384) { Data -= sizeof(FTidPacket); Size += sizeof(FTidPacket); auto* Packet = (FTidPacket*)Data; Packet->ThreadId = uint16(ThreadId & FTidPacketBase::ThreadIdMask); Packet->PacketSize = uint16(Size); Writer_SendDataImpl(Data, Size); return; } TTidPacketEncoded<8192 + 64> Packet; Packet.ThreadId = FTidPacketBase::EncodedMarker; Packet.ThreadId |= uint16(ThreadId & FTidPacketBase::ThreadIdMask); Packet.DecodedSize = uint16(Size); Packet.PacketSize = uint16(Encode(Data, Packet.DecodedSize, Packet.Data, sizeof(Packet.Data))); Packet.PacketSize += sizeof(FTidPacketEncoded); Writer_SendDataImpl(&Packet, Packet.PacketSize); } static void Writer_DescribeEvents() { TWriteBufferRedirect<4096> TraceData; FEventNode::FIter Iter = FEventNode::ReadNew(); while (const FEventNode* Event = Iter.GetNext()) { Event->Describe(); if (TraceData.GetSize() >= (TraceData.GetCapacity() - 512)) { Writer_SendData(ETransportTid::Events, TraceData.GetData(), TraceData.GetSize()); TraceData.Reset(); } } if (TraceData.GetSize()) { Writer_SendData(ETransportTid::Events, TraceData.GetData(), TraceData.GetSize()); } } static void Writer_AnnounceChannels() { FChannel::Iter Iter = FChannel::ReadNew(); while (const FChannel* Channel = Iter.GetNext()) { Channel->Announce(); } } static void Writer_DescribeAnnounce() { if (!GDataHandle) { return; } Writer_AnnounceChannels(); Writer_DescribeEvents(); } static int8 GSyncPacketCountdown; // = 0 static const int8 GNumSyncPackets = 3; static void Writer_SendSync() { if (GSyncPacketCountdown <= 0) { return; } FTidPacketBase SyncPacket = { sizeof(SyncPacket), ETransportTid::Sync }; Writer_SendDataImpl(&SyncPacket, sizeof(SyncPacket)); --GSyncPacketCountdown; } static bool Writer_UpdateConnection() { if (!GPendingDataHandle) { return false; } static const uint32 CloseInertia = 2; if (GPendingDataHandle >= (~0ull - CloseInertia)) { --GPendingDataHandle; if (GPendingDataHandle == (~0ull -CloseInertia)) { if (GDataHandle) { Writer_FlushSendBuffer(); IoClose(GDataHandle); } GDataHandle = 0; GPendingDataHandle = 0; } return true; } if (GDataHandle) { IoClose(GPendingDataHandle); GPendingDataHandle = 0; return false; } GDataHandle = GPendingDataHandle; GPendingDataHandle = 0; #if TRACE_PRIVATE_BUFFER_SEND if (!GSendBuffer) { GSendBuffer = static_cast(Writer_MemoryAllocate(GSendBufferSize, 16)); } GSendBufferCursor = GSendBuffer; #endif struct FHandshake { uint32 Magic = '2' | ('C' << 8) | ('R' << 16) | ('T' << 24); uint16 MetadataSize = uint16(4); // = sizeof(MetadataField0 + ControlPort) uint16 MetadataField0 = uint16(sizeof(ControlPort) | (ControlPortFieldId << 8)); uint16 ControlPort = uint16(Writer_GetControlPort()); enum { Size = 10, ControlPortFieldId = 0, }; }; FHandshake Handshake; bool bOk = IoWrite(GDataHandle, &Handshake, FHandshake::Size); const struct { uint8 TransportVersion = ETransport::TidPacketSync; uint8 ProtocolVersion = EProtocol::Id; } TransportHeader; bOk &= IoWrite(GDataHandle, &TransportHeader, sizeof(TransportHeader)); if (!bOk) { IoClose(GDataHandle); GDataHandle = 0; return false; } GTraceStatistics.BytesSent = 0; GTraceStatistics.BytesTraced = 0; FEventNode::OnConnect(); Writer_DescribeEvents(); Writer_CacheOnConnect(); Writer_TailOnConnect(); GSyncPacketCountdown = GNumSyncPackets; return true; } static UPTRINT GWorkerThread; // = 0; static volatile bool GWorkerThreadQuit; // = false; static volatile unsigned int GUpdateInProgress = 1; // Don't allow updates until initalized static void Writer_WorkerUpdate() { if (!AtomicCompareExchangeAcquire(&GUpdateInProgress, 1u, 0u)) { return; } Writer_UpdateControl(); Writer_UpdateConnection(); Writer_DescribeAnnounce(); Writer_UpdateSharedBuffers(); Writer_DrainBuffers(); Writer_SendSync(); #if TRACE_PRIVATE_BUFFER_SEND const uint32 FlushSendBufferCadenceMask = 8-1; // Flush every 8 calls if( (++GUpdateCounter & FlushSendBufferCadenceMask) == 0) { Writer_FlushSendBuffer(); } #endif AtomicExchangeRelease(&GUpdateInProgress, 0u); } static void Writer_WorkerThread() { ThreadRegister(TEXT("Trace"), 0, INT_MAX); while (!GWorkerThreadQuit) { Writer_WorkerUpdate(); const uint32 SleepMs = 17; ThreadSleep(SleepMs); } } void Writer_WorkerCreate() { if (GWorkerThread) { return; } GWorkerThread = ThreadCreate("TraceWorker", Writer_WorkerThread); } static void Writer_WorkerJoin() { if (!GWorkerThread) { return; } GWorkerThreadQuit = true; ThreadJoin(GWorkerThread); ThreadDestroy(GWorkerThread); Writer_WorkerUpdate(); GWorkerThread = 0; } static void Writer_InternalInitializeImpl() { if (GInitialized) { return; } GInitialized = true; GStartCycle = TimeGetTimestamp(); Writer_InitializeSharedBuffers(); Writer_InitializePool(); Writer_InitializeControl(); UE_TRACE_LOG($Trace, NewTrace, TraceLogChannel) << NewTrace.StartCycle(GStartCycle) << NewTrace.CycleFrequency(TimeGetFrequency()) << NewTrace.Endian(uint16(0x524d)) << NewTrace.PointerSize(uint8(sizeof(void*))); } static void Writer_InternalShutdown() { if (!GInitialized) { return; } Writer_WorkerJoin(); if (GDataHandle) { Writer_FlushSendBuffer(); IoClose(GDataHandle); GDataHandle = 0; } Writer_ShutdownControl(); Writer_ShutdownPool(); Writer_ShutdownSharedBuffers(); Writer_ShutdownCache(); Writer_ShutdownTail(); #if TRACE_PRIVATE_BUFFER_SEND if (GSendBuffer) { Writer_MemoryFree(GSendBuffer, GSendBufferSize); GSendBuffer = nullptr; GSendBufferCursor = nullptr; } #endif GInitialized = false; } void Writer_InternalInitialize() { using namespace Private; if (!GInitialized) { static struct FInitializer { FInitializer() { Writer_InternalInitializeImpl(); } ~FInitializer() { /* We'll not shut anything down here so we can hopefully capture * any subsequent events. However, we will shutdown the worker * thread and leave it for something else to call update() (mem * tracing at time of writing). Windows will have already done * this implicitly in ExitProcess() anyway. */ Writer_WorkerJoin(); } } Initializer; } } void Writer_Initialize(const FInitializeDesc& Desc) { Writer_InitializeTail(Desc.TailSizeBytes); if (Desc.bUseImportantCache) { Writer_InitializeCache(); } if (Desc.bUseWorkerThread) { Writer_WorkerCreate(); } GUpdateInProgress = 0; } void Writer_Shutdown() { Writer_InternalShutdown(); } void Writer_Update() { if (!GWorkerThread) { Writer_WorkerUpdate(); } } bool Writer_SendTo(const ANSICHAR* Host, uint32 Port) { if (GPendingDataHandle || GDataHandle) { return false; } Writer_InternalInitialize(); Port = Port ? Port : 1981; UPTRINT DataHandle = TcpSocketConnect(Host, uint16(Port)); if (!DataHandle) { return false; } GPendingDataHandle = DataHandle; return true; } bool Writer_WriteTo(const ANSICHAR* Path) { if (GPendingDataHandle || GDataHandle) { return false; } Writer_InternalInitialize(); UPTRINT DataHandle = FileOpen(Path); if (!DataHandle) { return false; } GPendingDataHandle = DataHandle; return true; } bool Writer_IsTracing() { return GDataHandle != 0 || GPendingDataHandle != 0; } bool Writer_Stop() { if (GPendingDataHandle || !GDataHandle) { return false; } GPendingDataHandle = ~UPTRINT(0); return true; } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 AndroidTrace.cpp */ #if UE_TRACE_ENABLED && PLATFORM_ANDROID #include #include #include #include #include #include #include #include #include namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR* Name, void (*Entry)()) { void* (*PthreadThunk)(void*) = [] (void* Param) -> void * { typedef void (*EntryType)(void); pthread_setname_np(pthread_self(), "Trace"); (EntryType(Param))(); return nullptr; }; pthread_t ThreadHandle; if (pthread_create(&ThreadHandle, nullptr, PthreadThunk, reinterpret_cast(Entry)) != 0) { return 0; } return static_cast(ThreadHandle); } void ThreadSleep(uint32 Milliseconds) { usleep(Milliseconds * 1000U); } void ThreadJoin(UPTRINT Handle) { pthread_join(static_cast(Handle), nullptr); } void ThreadDestroy(UPTRINT Handle) { } uint64 TimeGetFrequency() { return 1000000ull; } uint64 TimeGetTimestamp() { struct timespec TimeSpec; clock_gettime(CLOCK_MONOTONIC, &TimeSpec); return static_cast(static_cast(TimeSpec.tv_sec) * 1000000ULL + static_cast(TimeSpec.tv_nsec) / 1000ULL); } static bool TcpSocketSetNonBlocking(int Socket, bool bNonBlocking) { int Flags = fcntl(Socket, F_GETFL, 0); if (Flags == -1) { return false; } Flags = bNonBlocking ? (Flags|O_NONBLOCK) : (Flags & ~O_NONBLOCK); return fcntl(Socket, F_SETFL, Flags) >= 0; } UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port) { int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = inet_addr(Host); SockAddr.sin_port = htons(Port); int Result = connect(Socket, (sockaddr*)&SockAddr, sizeof(SockAddr)); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, false)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } UPTRINT TcpSocketListen(uint16 Port) { int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = 0; SockAddr.sin_port = htons(Port); int Result = bind(Socket, reinterpret_cast(&SockAddr), sizeof(SockAddr)); if (Result < 0) { close(Socket); return 0; } Result = listen(Socket, 1); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, true)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out) { int Inner = Socket - 1; Inner = accept(Inner, nullptr, nullptr); if (Inner < 0) { return (errno == EAGAIN || errno == EWOULDBLOCK) - 1; // 0 if would block else -1 } if (!TcpSocketSetNonBlocking(Inner, false)) { close(Inner); return 0; } Out = UPTRINT(Inner + 1); return 1; } bool TcpSocketHasData(UPTRINT Socket) { int Inner = Socket - 1; fd_set FdSet; FD_ZERO(&FdSet); FD_SET(Inner, &FdSet); timeval TimeVal = {}; return (select(Inner + 1, &FdSet, nullptr, nullptr, &TimeVal) != 0); } bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size) { int Inner = int(Handle) - 1; return write(Inner, Data, Size) == Size; } int32 IoRead(UPTRINT Handle, void* Data, uint32 Size) { int Inner = int(Handle) - 1; return read(Inner, Data, Size); } void IoClose(UPTRINT Handle) { int Inner = int(Handle) - 1; close(Inner); } UPTRINT FileOpen(const ANSICHAR* Path) { int Flags = O_CREAT|O_WRONLY|O_TRUNC; int Mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH; int Out = open(Path, Flags, Mode); if (Out < 0) { return 0; } return UPTRINT(Out + 1); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 AppleTrace.cpp */ #if UE_TRACE_ENABLED && PLATFORM_APPLE #include #include #include #include #include #include #include #include #include #include #include #include namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR* Name, void (*Entry)()) { void* (*PthreadThunk)(void*) = [] (void* Param) -> void * { typedef void (*EntryType)(void); (EntryType(Param))(); return nullptr; }; pthread_t ThreadHandle; if (pthread_create(&ThreadHandle, nullptr, PthreadThunk, reinterpret_cast(Entry)) != 0) { return 0; } return reinterpret_cast(ThreadHandle); } void ThreadSleep(uint32 Milliseconds) { usleep(Milliseconds * 1000U); } void ThreadJoin(UPTRINT Handle) { pthread_join(reinterpret_cast(Handle), nullptr); } void ThreadDestroy(UPTRINT Handle) { } uint64 TimeGetFrequency() { mach_timebase_info_data_t Info; mach_timebase_info(&Info); return (uint64(1 * 1000 * 1000 * 1000) * uint64(Info.denom)) / uint64(Info.numer); } TRACELOG_API uint64 TimeGetTimestamp() { return mach_absolute_time(); } static bool TcpSocketSetNonBlocking(int Socket, bool bNonBlocking) { int Flags = fcntl(Socket, F_GETFL, 0); if (Flags == -1) { return false; } Flags = bNonBlocking ? (Flags|O_NONBLOCK) : (Flags & ~O_NONBLOCK); return fcntl(Socket, F_SETFL, Flags) >= 0; } UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port) { #if PLATFORM_MAC // We're only accepting named hosts on desktop platforms struct FAddrInfoPtr { ~FAddrInfoPtr() { freeaddrinfo(Value); } addrinfo* operator -> () { return Value; } addrinfo** operator & () { return &Value; } addrinfo* Value; }; FAddrInfoPtr Info; addrinfo Hints = {}; Hints.ai_family = AF_INET; Hints.ai_socktype = SOCK_STREAM; Hints.ai_protocol = IPPROTO_TCP; if (getaddrinfo(Host, nullptr, &Hints, &Info)) { return 0; } if (&Info == nullptr) { return 0; } auto* SockAddr = (sockaddr_in*)Info->ai_addr; SockAddr->sin_port = htons(Port); int SockAddrSize = int(Info->ai_addrlen); #else sockaddr_in SockAddrIp; SockAddrIp.sin_family = AF_INET; SockAddrIp.sin_addr.s_addr = inet_addr(Host); SockAddrIp.sin_port = htons(Port); auto* SockAddr = &SockAddrIp; int SockAddrSize = sizeof(SockAddrIp); #endif int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } int Result = connect(Socket, (sockaddr*)SockAddr, SockAddrSize); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, false)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } UPTRINT TcpSocketListen(uint16 Port) { int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = 0; SockAddr.sin_port = htons(Port); int Result = bind(Socket, reinterpret_cast(&SockAddr), sizeof(SockAddr)); if (Result < 0) { close(Socket); return 0; } Result = listen(Socket, 1); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, true)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out) { int Inner = int(Socket - 1); Inner = accept(Inner, nullptr, nullptr); if (Inner < 0) { return (errno == EAGAIN || errno == EWOULDBLOCK) - 1; // 0 if would block else -1 } if (!TcpSocketSetNonBlocking(Inner, false)) { close(Inner); return 0; } Out = UPTRINT(Inner + 1); return 1; } bool TcpSocketHasData(UPTRINT Socket) { int Inner = int(Socket - 1); fd_set FdSet; FD_ZERO(&FdSet); FD_SET(Inner, &FdSet); timeval TimeVal = {}; int result = select(Inner + 1, &FdSet, nullptr, nullptr, &TimeVal); return ((result != 0) || ((result == -1) && (errno == ETIMEDOUT))); } bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size) { int Inner = int(Handle - 1); return (write(Inner, Data, Size) == Size); } int32 IoRead(UPTRINT Handle, void* Data, uint32 Size) { int Inner = int(Handle - 1); return read(Inner, Data, Size); } void IoClose(UPTRINT Handle) { int Inner = int(Handle - 1); close(Inner); } UPTRINT FileOpen(const ANSICHAR* Path) { int Flags = O_CREAT|O_WRONLY|O_TRUNC|O_SHLOCK; int Mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH; int Out = open(Path, Flags, Mode); if (Out < 0) { return 0; } return UPTRINT(Out + 1); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 HoloLensTrace.cpp */ #if UE_TRACE_ENABLED && PLATFORM_HOLOLENS # define _WINSOCK_DEPRECATED_NO_WARNINGS # include # include # pragma comment(lib, "ws2_32.lib") #pragma warning(push) #pragma warning(disable : 6031) // WSAStartup() return ignore - we're error tolerant namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR* Name, void (*Entry)()) { DWORD (WINAPI *WinApiThunk)(void*) = [] (void* Param) -> DWORD { typedef void (*EntryType)(void); (EntryType(Param))(); return 0; }; HANDLE Handle = CreateThread(nullptr, 0, WinApiThunk, (void*)Entry, 0, nullptr); return UPTRINT(Handle); } void ThreadSleep(uint32 Milliseconds) { Sleep(Milliseconds); } void ThreadJoin(UPTRINT Handle) { WaitForSingleObject(HANDLE(Handle), INFINITE); } void ThreadDestroy(UPTRINT Handle) { CloseHandle(HANDLE(Handle)); } uint64 TimeGetFrequency() { LARGE_INTEGER Value; QueryPerformanceFrequency(&Value); return Value.QuadPart; } TRACELOG_API uint64 TimeGetTimestamp() { LARGE_INTEGER Value; QueryPerformanceCounter(&Value); return Value.QuadPart; } static void TcpSocketInitialize() { WSADATA WsaData; WSAStartup(MAKEWORD(2, 2), &WsaData); } static bool TcpSocketSetNonBlocking(SOCKET Socket, bool bNonBlocking) { unsigned long NonBlockingMode = !!bNonBlocking; return ioctlsocket(Socket, FIONBIO, &NonBlockingMode) != SOCKET_ERROR; } UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port) { TcpSocketInitialize(); struct FAddrInfoPtr { ~FAddrInfoPtr() { freeaddrinfo(Value); } addrinfo* operator -> () { return Value; } addrinfo** operator & () { return &Value; } addrinfo* Value; }; FAddrInfoPtr Info; addrinfo Hints = {}; Hints.ai_family = AF_INET; Hints.ai_socktype = SOCK_STREAM; Hints.ai_protocol = IPPROTO_TCP; if (getaddrinfo(Host, nullptr, &Hints, &Info)) { return 0; } if (&Info == nullptr) { return 0; } auto* SockAddr = (sockaddr_in*)Info->ai_addr; SockAddr->sin_port = htons(Port); SOCKET Socket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nullptr, 0, WSA_FLAG_NO_HANDLE_INHERIT); if (Socket == INVALID_SOCKET) { return 0; } int Result = connect(Socket, Info->ai_addr, int(Info->ai_addrlen)); if (Result == SOCKET_ERROR) { closesocket(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, 0)) { closesocket(Socket); return 0; } return UPTRINT(Socket) + 1; } UPTRINT TcpSocketListen(uint16 Port) { TcpSocketInitialize(); SOCKET Socket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nullptr, 0, WSA_FLAG_NO_HANDLE_INHERIT); if (Socket == INVALID_SOCKET) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = 0; SockAddr.sin_port = htons(Port); int Result = bind(Socket, (SOCKADDR*)&SockAddr, sizeof(SockAddr)); if (Result == INVALID_SOCKET) { closesocket(Socket); return 0; } Result = listen(Socket, 1); if (Result == INVALID_SOCKET) { closesocket(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, 1)) { closesocket(Socket); return 0; } return UPTRINT(Socket) + 1; } int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out) { SOCKET Inner = Socket - 1; Inner = accept(Inner, nullptr, nullptr); if (Inner == INVALID_SOCKET) { return (WSAGetLastError() == WSAEWOULDBLOCK) - 1; // 0 if would block else -1 } if (!TcpSocketSetNonBlocking(Inner, 0)) { closesocket(Inner); return 0; } Out = UPTRINT(Inner) + 1; return 1; } bool TcpSocketHasData(UPTRINT Socket) { SOCKET Inner = Socket - 1; fd_set FdSet = { 1, { Inner }, }; TIMEVAL TimeVal = {}; return (select(0, &FdSet, nullptr, nullptr, &TimeVal) != 0); } bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size) { HANDLE Inner = HANDLE(Handle - 1); DWORD BytesWritten = 0; if (!WriteFile(Inner, (const char*)Data, Size, &BytesWritten, nullptr)) { return false; } return (BytesWritten == Size); } int32 IoRead(UPTRINT Handle, void* Data, uint32 Size) { HANDLE Inner = HANDLE(Handle - 1); DWORD BytesRead = 0; if (!ReadFile(Inner, (char*)Data, Size, &BytesRead, nullptr)) { return -1; } return BytesRead; } void IoClose(UPTRINT Handle) { HANDLE Inner = HANDLE(Handle - 1); CloseHandle(Inner); } UPTRINT FileOpen(const ANSICHAR* Path) { DWORD Access = GENERIC_WRITE; DWORD Share = FILE_SHARE_READ; DWORD Disposition = CREATE_ALWAYS; DWORD Flags = FILE_ATTRIBUTE_NORMAL; HANDLE Out = CreateFile2((LPCWSTR)Path, Access, Share, Disposition, nullptr); if (Out == INVALID_HANDLE_VALUE) { return 0; } return UPTRINT(Out) + 1; } } // namespace Private } // namespace Trace } // namespace UE #pragma warning(pop) #endif // UE_TRACE_ENABLED /* {{{1 UnixTrace.cpp */ #if UE_TRACE_ENABLED && PLATFORM_UNIX #include #include #include #include #include #include #include #include #include #if defined(_GNU_SOURCE) #include #endif // _GNU_SOURCE namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR* Name, void (*Entry)()) { void* (*PthreadThunk)(void*) = [] (void* Param) -> void * { typedef void (*EntryType)(void); (EntryType(Param))(); return nullptr; }; pthread_t ThreadHandle; if (pthread_create(&ThreadHandle, nullptr, PthreadThunk, reinterpret_cast(Entry)) != 0) { return 0; } return static_cast(ThreadHandle); } void ThreadSleep(uint32 Milliseconds) { usleep(Milliseconds * 1000U); } void ThreadJoin(UPTRINT Handle) { pthread_join(static_cast(Handle), nullptr); } void ThreadDestroy(UPTRINT Handle) { } uint64 TimeGetFrequency() { return 10000000ull; } TRACELOG_API uint64 TimeGetTimestamp() { struct timespec TimeSpec; clock_gettime(CLOCK_MONOTONIC, &TimeSpec); return static_cast(static_cast(TimeSpec.tv_sec) * 10000000ULL + static_cast(TimeSpec.tv_nsec) / 100ULL); } static bool TcpSocketSetNonBlocking(int Socket, bool bNonBlocking) { int Flags = fcntl(Socket, F_GETFL, 0); if (Flags == -1) { return false; } Flags = bNonBlocking ? (Flags|O_NONBLOCK) : (Flags & ~O_NONBLOCK); return fcntl(Socket, F_SETFL, Flags) >= 0; } UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port) { struct FAddrInfoPtr { ~FAddrInfoPtr() { if (Value != nullptr) { freeaddrinfo(Value); } } addrinfo* operator -> () { return Value; } addrinfo** operator & () { return &Value; } addrinfo* Value; }; FAddrInfoPtr Info; addrinfo Hints = {}; Hints.ai_family = AF_INET; Hints.ai_socktype = SOCK_STREAM; Hints.ai_protocol = IPPROTO_TCP; if (getaddrinfo(Host, nullptr, &Hints, &Info)) { Info.Value = nullptr; return 0; } if (&Info == nullptr) { return 0; } auto* SockAddr = (sockaddr_in*)Info->ai_addr; SockAddr->sin_port = htons(Port); int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } int Result = connect(Socket, Info->ai_addr, int(Info->ai_addrlen)); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, false)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } UPTRINT TcpSocketListen(uint16 Port) { int Socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (Socket < 0) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = 0; SockAddr.sin_port = htons(Port); int Result = bind(Socket, reinterpret_cast(&SockAddr), sizeof(SockAddr)); if (Result < 0) { close(Socket); return 0; } Result = listen(Socket, 1); if (Result < 0) { close(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, true)) { close(Socket); return 0; } return UPTRINT(Socket + 1); } int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out) { int Inner = Socket - 1; Inner = accept(Inner, nullptr, nullptr); if (Inner < 0) { return (errno == EAGAIN || errno == EWOULDBLOCK) - 1; // 0 if would block else -1 } if (!TcpSocketSetNonBlocking(Inner, false)) { close(Inner); return 0; } Out = UPTRINT(Inner + 1); return 1; } bool TcpSocketHasData(UPTRINT Socket) { int Inner = Socket - 1; fd_set FdSet; FD_ZERO(&FdSet); FD_SET(Inner, &FdSet); timeval TimeVal = {}; return (select(Inner + 1, &FdSet, nullptr, nullptr, &TimeVal) != 0); } bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size) { int Inner = int(Handle) - 1; return write(Inner, Data, Size) == Size; } int32 IoRead(UPTRINT Handle, void* Data, uint32 Size) { int Inner = int(Handle) - 1; return read(Inner, Data, Size); } void IoClose(UPTRINT Handle) { int Inner = int(Handle) - 1; close(Inner); } UPTRINT FileOpen(const ANSICHAR* Path) { int Flags = O_CREAT|O_WRONLY|O_TRUNC; int Mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH; int Out = open(Path, Flags, Mode); if (Out < 0) { return 0; } return UPTRINT(Out + 1); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 WindowsTrace.cpp */ #if UE_TRACE_ENABLED && PLATFORM_WINDOWS # define _WINSOCK_DEPRECATED_NO_WARNINGS # include # include # pragma comment(lib, "ws2_32.lib") #pragma warning(push) #pragma warning(disable : 6031) // WSAStartup() return ignore - we're error tolerant namespace UE { namespace Trace { namespace Private { UPTRINT ThreadCreate(const ANSICHAR*, void (*Entry)()) { DWORD (WINAPI *WinApiThunk)(void*) = [] (void* Param) -> DWORD { typedef void (*EntryType)(void); (EntryType(Param))(); return 0; }; HANDLE Handle = CreateThread(nullptr, 0, WinApiThunk, (void*)Entry, 0, nullptr); return UPTRINT(Handle); } void ThreadSleep(uint32 Milliseconds) { Sleep(Milliseconds); } void ThreadJoin(UPTRINT Handle) { WaitForSingleObject(HANDLE(Handle), INFINITE); } void ThreadDestroy(UPTRINT Handle) { CloseHandle(HANDLE(Handle)); } uint64 TimeGetFrequency() { LARGE_INTEGER Value; QueryPerformanceFrequency(&Value); return Value.QuadPart; } TRACELOG_API uint64 TimeGetTimestamp() { LARGE_INTEGER Value; QueryPerformanceCounter(&Value); return Value.QuadPart; } static void TcpSocketInitialize() { WSADATA WsaData; WSAStartup(MAKEWORD(2, 2), &WsaData); } static bool TcpSocketSetNonBlocking(SOCKET Socket, bool bNonBlocking) { unsigned long NonBlockingMode = !!bNonBlocking; return ioctlsocket(Socket, FIONBIO, &NonBlockingMode) != SOCKET_ERROR; } UPTRINT TcpSocketConnect(const ANSICHAR* Host, uint16 Port) { TcpSocketInitialize(); struct FAddrInfoPtr { ~FAddrInfoPtr() { freeaddrinfo(Value); } addrinfo* operator -> () { return Value; } addrinfo** operator & () { return &Value; } addrinfo* Value; }; FAddrInfoPtr Info; addrinfo Hints = {}; Hints.ai_family = AF_INET; Hints.ai_socktype = SOCK_STREAM; Hints.ai_protocol = IPPROTO_TCP; if (getaddrinfo(Host, nullptr, &Hints, &Info)) { return 0; } if (&Info == nullptr) { return 0; } auto* SockAddr = (sockaddr_in*)Info->ai_addr; SockAddr->sin_port = htons(Port); SOCKET Socket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nullptr, 0, WSA_FLAG_NO_HANDLE_INHERIT); if (Socket == INVALID_SOCKET) { return 0; } int Result = connect(Socket, Info->ai_addr, int(Info->ai_addrlen)); if (Result == SOCKET_ERROR) { closesocket(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, 0)) { closesocket(Socket); return 0; } return UPTRINT(Socket) + 1; } UPTRINT TcpSocketListen(uint16 Port) { TcpSocketInitialize(); SOCKET Socket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nullptr, 0, WSA_FLAG_NO_HANDLE_INHERIT); if (Socket == INVALID_SOCKET) { return 0; } sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_addr.s_addr = 0; SockAddr.sin_port = htons(Port); int Result = bind(Socket, (SOCKADDR*)&SockAddr, sizeof(SockAddr)); if (Result == INVALID_SOCKET) { closesocket(Socket); return 0; } Result = listen(Socket, 1); if (Result == INVALID_SOCKET) { closesocket(Socket); return 0; } if (!TcpSocketSetNonBlocking(Socket, 1)) { closesocket(Socket); return 0; } return UPTRINT(Socket) + 1; } int32 TcpSocketAccept(UPTRINT Socket, UPTRINT& Out) { SOCKET Inner = Socket - 1; Inner = accept(Inner, nullptr, nullptr); if (Inner == INVALID_SOCKET) { return (WSAGetLastError() == WSAEWOULDBLOCK) - 1; // 0 if would block else -1 } if (!TcpSocketSetNonBlocking(Inner, 0)) { closesocket(Inner); return 0; } Out = UPTRINT(Inner) + 1; return 1; } bool TcpSocketHasData(UPTRINT Socket) { SOCKET Inner = Socket - 1; fd_set FdSet = { 1, { Inner }, }; TIMEVAL TimeVal = {}; return (select(0, &FdSet, nullptr, nullptr, &TimeVal) != 0); } bool IoWrite(UPTRINT Handle, const void* Data, uint32 Size) { HANDLE Inner = HANDLE(Handle - 1); DWORD BytesWritten = 0; if (!WriteFile(Inner, (const char*)Data, Size, &BytesWritten, nullptr)) { return false; } return (BytesWritten == Size); } int32 IoRead(UPTRINT Handle, void* Data, uint32 Size) { HANDLE Inner = HANDLE(Handle - 1); DWORD BytesRead = 0; if (!ReadFile(Inner, (char*)Data, Size, &BytesRead, nullptr)) { return -1; } return BytesRead; } void IoClose(UPTRINT Handle) { HANDLE Inner = HANDLE(Handle - 1); CloseHandle(Inner); } UPTRINT FileOpen(const ANSICHAR* Path) { DWORD Access = GENERIC_WRITE; DWORD Share = FILE_SHARE_READ; DWORD Disposition = CREATE_ALWAYS; DWORD Flags = FILE_ATTRIBUTE_NORMAL; HANDLE Out = CreateFileA(Path, Access, Share, nullptr, Disposition, Flags, nullptr); if (Out == INVALID_HANDLE_VALUE) { return 0; } return UPTRINT(Out) + 1; } } // namespace Private } // namespace Trace } // namespace UE #pragma warning(pop) #endif // UE_TRACE_ENABLED /* {{{1 Cache.cpp */ #if UE_TRACE_ENABLED #include namespace UE { namespace Trace { namespace Private { uint32 GetEncodeMaxSize(uint32); int32 Encode(const void*, int32, void*, int32); void* Writer_MemoryAllocate(SIZE_T, uint32); void Writer_MemoryFree(void*, uint32); void Writer_SendDataRaw(const void*, uint32); void Writer_SendData(uint32, uint8* __restrict, uint32); struct alignas(16) FCacheBuffer { union { FCacheBuffer* Next; FCacheBuffer** TailNext; }; uint32 Size; uint32 Remaining; uint32 _Unused[3]; uint32 Underflow; // For packet header uint8 Data[]; }; static const uint32 GCacheBufferSize = 64 << 10; static const uint32 GCacheCollectorSize = 1 << 10; static FCacheBuffer* GCacheCollector; // = nullptr; static FCacheBuffer* GCacheActiveBuffer; // = nullptr; static FCacheBuffer* GCacheHeadBuffer; // = nullptr; extern FStatistics GTraceStatistics; static FCacheBuffer* Writer_CacheCreateBuffer(uint32 Size) { void* Block = Writer_MemoryAllocate(sizeof(FCacheBuffer) + Size, alignof(FCacheBuffer)); auto* Buffer = (FCacheBuffer*)Block; Buffer->Size = Size; Buffer->Remaining = Buffer->Size; Buffer->Next = nullptr; return Buffer; } static void Writer_CacheCommit(const FCacheBuffer* Collector) { uint32 InputSize = uint32(Collector->Size - Collector->Remaining); uint32 EncodeMaxSize = GetEncodeMaxSize(InputSize); if (EncodeMaxSize + sizeof(FTidPacketEncoded) > GCacheActiveBuffer->Remaining) { #if TRACE_PRIVATE_STATISTICS GTraceStatistics.CacheWaste += GCacheActiveBuffer->Remaining; #endif *(GCacheActiveBuffer->TailNext) = GCacheActiveBuffer; GCacheActiveBuffer->TailNext = nullptr; FCacheBuffer* NewBuffer = Writer_CacheCreateBuffer(GCacheBufferSize); NewBuffer->TailNext = &(GCacheActiveBuffer->Next); GCacheActiveBuffer = NewBuffer; } uint32 Used = GCacheActiveBuffer->Size - GCacheActiveBuffer->Remaining; auto* Packet = (FTidPacketEncoded*)(GCacheActiveBuffer->Data + Used); uint32 OutputSize = Encode(Collector->Data, InputSize, Packet->Data, EncodeMaxSize); Packet->PacketSize = uint16(OutputSize + sizeof(FTidPacketEncoded)); Packet->ThreadId = FTidPacketBase::EncodedMarker | uint16(ETransportTid::Importants); Packet->DecodedSize = uint16(InputSize); Used = sizeof(FTidPacketEncoded) + OutputSize; GCacheActiveBuffer->Remaining -= Used; #if TRACE_PRIVATE_STATISTICS GTraceStatistics.CacheUsed += Used; #endif } void Writer_CacheData(uint8* Data, uint32 Size) { Writer_SendData(ETransportTid::Importants, Data, Size); if (GCacheCollector == nullptr) { return; } while (true) { uint32 StepSize = (Size < GCacheCollector->Remaining) ? Size : GCacheCollector->Remaining; uint32 Used = GCacheCollector->Size - GCacheCollector->Remaining; memcpy(GCacheCollector->Data + Used, Data, StepSize); GCacheCollector->Remaining -= StepSize; if (GCacheCollector->Remaining == 0) { Writer_CacheCommit(GCacheCollector); GCacheCollector->Remaining = GCacheCollector->Size; } Size -= StepSize; if (Size == 0) { break; } Data += StepSize; } } void Writer_CacheOnConnect() { if (GCacheCollector == nullptr) { return; } for (FCacheBuffer* Buffer = GCacheHeadBuffer; Buffer != nullptr; Buffer = Buffer->Next) { uint32 Used = Buffer->Size - Buffer->Remaining; Writer_SendDataRaw(Buffer->Data, Used); } if (uint32 Used = GCacheActiveBuffer->Size - GCacheActiveBuffer->Remaining) { Writer_SendDataRaw(GCacheActiveBuffer->Data, Used); } if (uint32 Used = GCacheCollector->Size - GCacheCollector->Remaining) { Writer_SendData(ETransportTid::Importants, GCacheCollector->Data, Used); } } void Writer_InitializeCache() { GCacheCollector = Writer_CacheCreateBuffer(GCacheCollectorSize); GCacheActiveBuffer = Writer_CacheCreateBuffer(GCacheBufferSize); GCacheActiveBuffer->TailNext = &GCacheHeadBuffer; static_assert(ETransport::Active == ETransport::TidPacketSync, "The important cache is transport aware"); } void Writer_ShutdownCache() { for (FCacheBuffer* Buffer = GCacheHeadBuffer; Buffer != nullptr;) { FCacheBuffer* Next = Buffer->Next; Writer_MemoryFree(Buffer, GCacheBufferSize); Buffer = Next; } Writer_MemoryFree(GCacheActiveBuffer, GCacheBufferSize); Writer_MemoryFree(GCacheCollector, GCacheCollectorSize); } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED /* {{{1 SharedBuffer.cpp */ #if UE_TRACE_ENABLED namespace UE { namespace Trace { namespace Private { void* Writer_MemoryAllocate(SIZE_T, uint32); void Writer_MemoryFree(void*, uint32); void Writer_CacheData(uint8*, uint32); static FSharedBuffer GNullSharedBuffer = { 0, FSharedBuffer::RefInit }; FSharedBuffer* volatile GSharedBuffer = &GNullSharedBuffer; static FSharedBuffer* GTailBuffer; // = nullptr static uint32 GTailPreSent; // = 0 static const uint32 GBlockSize = 1024; // Block size must be a power of two! extern FStatistics GTraceStatistics; static FSharedBuffer* Writer_CreateSharedBuffer(uint32 SizeHint=0) { const uint32 OverheadSize = sizeof(FSharedBuffer) + sizeof(uint32); uint32 BlockSize = GBlockSize; if (SizeHint + OverheadSize > GBlockSize) { BlockSize += SizeHint + OverheadSize - GBlockSize; BlockSize += GBlockSize - 1; BlockSize &= ~(GBlockSize - 1); } void* Block = Writer_MemoryAllocate(BlockSize, alignof(FSharedBuffer)); auto* Buffer = (FSharedBuffer*)(UPTRINT(Block) + BlockSize) - 1; Buffer->Size = uint32(UPTRINT(Buffer) - UPTRINT(Block)); Buffer->Size -= sizeof(uint32); // to preceed event data with a small header when sending. Buffer->Cursor = (Buffer->Size << FSharedBuffer::CursorShift) | FSharedBuffer::RefInit; Buffer->Next = nullptr; Buffer->Final = 0; return Buffer; } FNextSharedBuffer Writer_NextSharedBuffer(FSharedBuffer* Buffer, int32 RegionStart, int32 NegSizeAndRef) { FSharedBuffer* NextBuffer; while (true) { bool bBufferOwner = (RegionStart >= 0); if (LIKELY(bBufferOwner)) { uint32 Size = -NegSizeAndRef >> FSharedBuffer::CursorShift; NextBuffer = Writer_CreateSharedBuffer(Size); Buffer->Next = NextBuffer; Buffer->Final = RegionStart >> FSharedBuffer::CursorShift; AtomicStoreRelease(&GSharedBuffer, NextBuffer); } else { for (;; PlatformYield()) { NextBuffer = AtomicLoadAcquire(&GSharedBuffer); if (NextBuffer != Buffer) { break; } } } AtomicAddRelease(&(Buffer->Cursor), int32(FSharedBuffer::RefBit)); RegionStart = AtomicAddRelaxed(&(NextBuffer->Cursor), NegSizeAndRef); if (LIKELY(RegionStart + NegSizeAndRef >= 0)) { break; } Buffer = NextBuffer; } return { NextBuffer, RegionStart }; } static void Writer_RetireSharedBufferImpl() { uint8* Data = (uint8*)GTailBuffer - GTailBuffer->Size + GTailPreSent; if (auto SendSize = UPTRINT(GTailBuffer) - UPTRINT(Data) - GTailBuffer->Final) { #if TRACE_PRIVATE_STATISTICS GTraceStatistics.BytesTraced += SendSize; #endif Writer_CacheData(Data, uint32(SendSize)); } FSharedBuffer* Temp = GTailBuffer->Next; void* Block = (uint8*)GTailBuffer - GTailBuffer->Size - sizeof(uint32); Writer_MemoryFree(Block, GBlockSize); GTailBuffer = Temp; GTailPreSent = 0; } static void Writer_RetireSharedBuffer() { for (;; PlatformYield()) { int32 TailCursor = AtomicLoadAcquire(&(GTailBuffer->Cursor)); if (LIKELY(((TailCursor + 1) & FSharedBuffer::RefInit) == 0)) { break; } } Writer_RetireSharedBufferImpl(); } void Writer_UpdateSharedBuffers() { FSharedBuffer* HeadBuffer = AtomicLoadAcquire(&GSharedBuffer); while (true) { if (GTailBuffer != HeadBuffer) { Writer_RetireSharedBuffer(); continue; } int32 Cursor = AtomicLoadAcquire(&(HeadBuffer->Cursor)); if ((Cursor + 1) & FSharedBuffer::RefInit) { continue; } Cursor = Cursor >> FSharedBuffer::CursorShift; if (Cursor < 0) { Writer_RetireSharedBufferImpl(); break; } uint32 PreSentBias = HeadBuffer->Size - GTailPreSent; if (uint32 Sendable = PreSentBias - Cursor) { uint8* Data = (uint8*)(UPTRINT(HeadBuffer) - PreSentBias); Writer_CacheData(Data, Sendable); GTailPreSent += Sendable; } break; } } void Writer_InitializeSharedBuffers() { FSharedBuffer* Buffer = Writer_CreateSharedBuffer(); GTailBuffer = Buffer; GTailPreSent = 0; AtomicStoreRelease(&GSharedBuffer, Buffer); } void Writer_ShutdownSharedBuffers() { } } // namespace Private } // namespace Trace } // namespace UE #endif // UE_TRACE_ENABLED #endif // TRACE_IMPLEMENT /* {{{1 standalone_epilogue.h */ // Copyright Epic Games, Inc. All Rights Reserved. // {{{1 amalgamation-tail ////////////////////////////////////////////////////// #if PLATFORM_WINDOWS # pragma warning(pop) #endif #if TRACE_UE_COMPAT_LAYER #if PLATFORM_WINDOWS # if defined(UNICODE) || defined(_UNICODE) # undef TEXT # undef TCHAR # define TEXT(x) L##x # endif #endif #endif // TRACE_UE_COMPAT_LAYER // {{{1 library-setup ////////////////////////////////////////////////////////// #include #define TRACE_EVENT_DEFINE UE_TRACE_EVENT_DEFINE #define TRACE_EVENT_BEGIN UE_TRACE_EVENT_BEGIN #define TRACE_EVENT_BEGIN_EXTERN UE_TRACE_EVENT_BEGIN_EXTERN #define TRACE_EVENT_FIELD UE_TRACE_EVENT_FIELD #define TRACE_EVENT_END UE_TRACE_EVENT_END #define TRACE_LOG UE_TRACE_LOG #define TRACE_LOG_SCOPED UE_TRACE_LOG_SCOPED #define TRACE_LOG_SCOPED_T UE_TRACE_LOG_SCOPED_T #define TRACE_CHANNEL UE_TRACE_CHANNEL #define TRACE_CHANNEL_EXTERN UE_TRACE_CHANNEL_EXTERN #define TRACE_CHANNEL_DEFINE UE_TRACE_CHANNEL_DEFINE namespace trace = UE::Trace; #define TRACE_PRIVATE_CONCAT_(x, y) x##y #define TRACE_PRIVATE_CONCAT(x, y) TRACE_PRIVATE_CONCAT_(x, y) #define TRACE_PRIVATE_UNIQUE_VAR(name) TRACE_PRIVATE_CONCAT($trace_##name, __LINE__) // {{{1 session-header ///////////////////////////////////////////////////////// namespace UE { namespace Trace { enum class Build { Unknown, Debug, DebugGame, Development, Shipping, Test }; void DescribeSession( const std::string_view& AppName, Build Variant=Build::Unknown, const std::string_view& CommandLine="", const std::string_view& BuildVersion="unknown_ver"); } // namespace Trace } // namespace UE // {{{1 session-source ///////////////////////////////////////////////////////// #if TRACE_IMPLEMENT namespace UE { namespace Trace { namespace Private { TRACE_EVENT_BEGIN(Diagnostics, Session2, NoSync|Important) TRACE_EVENT_FIELD(uint8, ConfigurationType) TRACE_EVENT_FIELD(trace::AnsiString, AppName) TRACE_EVENT_FIELD(trace::AnsiString, BuildVersion) TRACE_EVENT_FIELD(trace::AnsiString, Platform) TRACE_EVENT_FIELD(trace::AnsiString, CommandLine) TRACE_EVENT_END() } // namespace Private //////////////////////////////////////////////////////////////////////////////// void DescribeSession( const std::string_view& AppName, Build Variant, const std::string_view& CommandLine, const std::string_view& BuildVersion) { using namespace Private; using namespace std::literals; std::string_view Platform; #if PLATFORM_WINDOWS Platform = "Windows"sv; #elif PLATFORM_UNIX Platform = "Linux"sv; #elif PLATFORM_MAC Platform = "Mac"sv; #else Platform = "Unknown"sv; #endif int DataSize = 0; DataSize += AppName.size(); DataSize += BuildVersion.size(); DataSize += Platform.size(); DataSize += CommandLine.size(); TRACE_LOG(Diagnostics, Session2, true, DataSize) << Session2.AppName(AppName.data(), int(AppName.size())) << Session2.BuildVersion(BuildVersion.data(), int(BuildVersion.size())) << Session2.Platform(Platform.data(), int(Platform.size())) << Session2.CommandLine(CommandLine.data(), int(CommandLine.size())) << Session2.ConfigurationType(uint8(Variant)); } } // namespace Trace } // namespace UE #endif // TRACE_IMPLEMENT // {{{1 cpu-header ///////////////////////////////////////////////////////////// TRACE_CHANNEL_EXTERN(CpuChannel) namespace UE { namespace Trace { enum CpuScopeFlags : int { CpuFlush = 1 << 0, }; struct TraceCpuScope { ~TraceCpuScope(); void Enter(int ScopeId, int Flags=0); int _ScopeId = 0; }; int ScopeNew(const std::string_view& Name); } // namespace Trace } // namespace UE #define TRACE_CPU_SCOPE(name, ...) \ trace::TraceCpuScope TRACE_PRIVATE_UNIQUE_VAR(cpu_scope); \ if (CpuChannel) { \ using namespace std::literals; \ static int TRACE_PRIVATE_UNIQUE_VAR(scope_id); \ if (0 == TRACE_PRIVATE_UNIQUE_VAR(scope_id)) \ TRACE_PRIVATE_UNIQUE_VAR(scope_id) = trace::ScopeNew(name##sv); \ TRACE_PRIVATE_UNIQUE_VAR(cpu_scope).Enter(TRACE_PRIVATE_UNIQUE_VAR(scope_id), ##__VA_ARGS__); \ } \ do {} while (0) // {{{1 cpu-source ///////////////////////////////////////////////////////////// #if TRACE_IMPLEMENT TRACE_CHANNEL_DEFINE(CpuChannel) namespace UE { namespace Trace { namespace Private { TRACE_EVENT_BEGIN(CpuProfiler, EventSpec, NoSync|Important) TRACE_EVENT_FIELD(uint32, Id) TRACE_EVENT_FIELD(trace::AnsiString, Name) TRACE_EVENT_END() TRACE_EVENT_BEGIN(CpuProfiler, EventBatch, NoSync) TRACE_EVENT_FIELD(uint8[], Data) TRACE_EVENT_END() //////////////////////////////////////////////////////////////////////////////// static int32_t encode32_7bit(int32_t value, void* __restrict out) { // Calculate the number of bytes int32_t length = 1; length += (value >= (1 << 7)); length += (value >= (1 << 14)); length += (value >= (1 << 21)); // Add a gap every eigth bit for the continuations int32_t ret = value; ret = (ret & 0x0000'3fff) | ((ret & 0x0fff'c000) << 2); ret = (ret & 0x007f'007f) | ((ret & 0x3f80'3f80) << 1); // Set the bits indicating another byte follows int32_t continuations = 0x0080'8080; continuations >>= (sizeof(value) - length) * 8; ret |= continuations; ::memcpy(out, &ret, sizeof(value)); return length; } //////////////////////////////////////////////////////////////////////////////// static int32_t encode64_7bit(int64_t value, void* __restrict out) { // Calculate the output length uint32_t length = 1; length += (value >= (1ll << 7)); length += (value >= (1ll << 14)); length += (value >= (1ll << 21)); length += (value >= (1ll << 28)); length += (value >= (1ll << 35)); length += (value >= (1ll << 42)); length += (value >= (1ll << 49)); // Add a gap every eigth bit for the continuations int64_t ret = value; ret = (ret & 0x0000'0000'0fff'ffffull) | ((ret & 0x00ff'ffff'f000'0000ull) << 4); ret = (ret & 0x0000'3fff'0000'3fffull) | ((ret & 0x0fff'c000'0fff'c000ull) << 2); ret = (ret & 0x007f'007f'007f'007full) | ((ret & 0x3f80'3f80'3f80'3f80ull) << 1); // Set the bits indicating another byte follows int64_t continuations = 0x0080'8080'8080'8080ull; continuations >>= (sizeof(value) - length) * 8; ret |= continuations; ::memcpy(out, &ret, sizeof(value)); return length; } //////////////////////////////////////////////////////////////////////////////// class ThreadBuffer { public: static void Enter(uint64_t Timestamp, uint32_t ScopeId, int Flags); static void Leave(uint64_t Timestamp); private: ~ThreadBuffer(); void Flush(bool Force); void EnterImpl(uint64_t Timestamp, uint32_t ScopeId, int Flags); void LeaveImpl(uint64_t Timestamp); enum { BufferSize = 256, Overflow = 16, EnterLsb = 1, LeaveLsb = 0, }; uint64_t PrevTimestamp = 0; uint8_t* Cursor = Buffer; uint8_t Buffer[BufferSize]; static thread_local ThreadBuffer TlsInstance; }; thread_local ThreadBuffer ThreadBuffer::TlsInstance; //////////////////////////////////////////////////////////////////////////////// inline void ThreadBuffer::Enter(uint64_t Timestamp, uint32_t ScopeId, int Flags) { TlsInstance.EnterImpl(Timestamp, ScopeId, Flags); } //////////////////////////////////////////////////////////////////////////////// inline void ThreadBuffer::Leave(uint64_t Timestamp) { TlsInstance.LeaveImpl(Timestamp); } //////////////////////////////////////////////////////////////////////////////// ThreadBuffer::~ThreadBuffer() { Flush(true); } //////////////////////////////////////////////////////////////////////////////// void ThreadBuffer::Flush(bool Force) { using namespace Private; if (!Force && (Cursor <= (Buffer + BufferSize - Overflow))) return; TRACE_LOG(CpuProfiler, EventBatch, true) << EventBatch.Data(Buffer, uint32(ptrdiff_t(Cursor - Buffer))); PrevTimestamp = 0; Cursor = Buffer; } //////////////////////////////////////////////////////////////////////////////// void ThreadBuffer::EnterImpl(uint64_t Timestamp, uint32_t ScopeId, int Flags) { Timestamp -= PrevTimestamp; PrevTimestamp += Timestamp; Cursor += encode64_7bit((Timestamp) << 1 | EnterLsb, Cursor); Cursor += encode32_7bit(ScopeId, Cursor); bool ShouldFlush = (Flags & CpuScopeFlags::CpuFlush); Flush(ShouldFlush); } //////////////////////////////////////////////////////////////////////////////// void ThreadBuffer::LeaveImpl(uint64_t Timestamp) { Timestamp -= PrevTimestamp; PrevTimestamp += Timestamp; Cursor += encode64_7bit((Timestamp << 1) | LeaveLsb, Cursor); Flush(false); } } // namespace Private //////////////////////////////////////////////////////////////////////////////// int ScopeNew(const std::string_view& Name) { using namespace Private; static int volatile NextSpecId = 1; int SpecId = AtomicAddRelaxed(&NextSpecId, 1); uint32 NameSize = uint32(Name.size()); TRACE_LOG(CpuProfiler, EventSpec, true, NameSize) << EventSpec.Id(uint32(SpecId)) << EventSpec.Name(Name.data(), NameSize); return SpecId; } //////////////////////////////////////////////////////////////////////////////// TraceCpuScope::~TraceCpuScope() { using namespace Private; if (!_ScopeId) return; uint64 Timestamp = TimeGetTimestamp(); ThreadBuffer::Leave(Timestamp); } //////////////////////////////////////////////////////////////////////////////// void TraceCpuScope::Enter(int ScopeId, int Flags) { using namespace Private; _ScopeId = ScopeId; uint64 Timestamp = TimeGetTimestamp(); ThreadBuffer::Enter(Timestamp, ScopeId, Flags); } } // namespace Trace } // namespace UE #endif // TRACE_IMPLEMENT // {{{1 log-header ///////////////////////////////////////////////////////////// TRACE_CHANNEL_EXTERN(LogChannel) namespace UE { namespace Trace { namespace Private { void LogMessageImpl(int Id, const void* ParamBuffer, int ParamSize); int LogMessageNew(const std::string_view& Format, const std::string_view& File, int Line); template void LogMessage(int Id, ARGS&&... Args) { LogMessageImpl(Id, nullptr, 0); } } // namespace Private } // namespace Trace } // namespace UE #define TRACE_LOG_MESSAGE(format, ...) \ if (LogChannel) { \ using namespace std::literals; \ static int message_id; \ if (message_id == 0) \ message_id = trace::Private::LogMessageNew( \ format##sv, \ TRACE_PRIVATE_CONCAT(__FILE__, sv), \ __LINE__); \ trace::Private::LogMessage(message_id, ##__VA_ARGS__); \ } \ do {} while (0) // {{{1 log-source ///////////////////////////////////////////////////////////// #if TRACE_IMPLEMENT TRACE_CHANNEL_DEFINE(LogChannel) namespace UE { namespace Trace { namespace Private { #if 0 TRACE_EVENT_BEGIN(Logging, LogCategory, NoSync|Important) TRACE_EVENT_FIELD(const void*, CategoryPointer) TRACE_EVENT_FIELD(uint8, DefaultVerbosity) TRACE_EVENT_FIELD(UE::Trace::AnsiString, Name) TRACE_EVENT_END() #endif TRACE_EVENT_BEGIN(Logging, LogMessageSpec, NoSync|Important) TRACE_EVENT_FIELD(uint32, LogPoint) //TRACE_EVENT_FIELD(uint16, CategoryPointer) TRACE_EVENT_FIELD(uint16, Line) TRACE_EVENT_FIELD(UE::Trace::AnsiString, FileName) TRACE_EVENT_FIELD(UE::Trace::AnsiString, FormatString) TRACE_EVENT_END() TRACE_EVENT_BEGIN(Logging, LogMessage, NoSync) TRACE_EVENT_FIELD(uint32, LogPoint) TRACE_EVENT_FIELD(uint64, Cycle) TRACE_EVENT_FIELD(uint8[], FormatArgs) TRACE_EVENT_END() //////////////////////////////////////////////////////////////////////////////// void LogMessageImpl(int Id, const void* ParamBuffer, int ParamSize) { (void)ParamBuffer; (void)ParamSize; uint64 Timestamp = TimeGetTimestamp(); TRACE_LOG(Logging, LogMessage, true) << LogMessage.LogPoint(Id) << LogMessage.Cycle(Timestamp); } //////////////////////////////////////////////////////////////////////////////// int LogMessageNew( const std::string_view& Format, const std::string_view& File, int Line) { static int volatile NextId = 1; int Id = AtomicAddRelaxed(&NextId, 1); int DataSize = 0; DataSize += Format.size(); DataSize += File.size(); TRACE_LOG(Logging, LogMessageSpec, true, DataSize) << LogMessageSpec.LogPoint(Id) << LogMessageSpec.Line(uint16(Line)) << LogMessageSpec.FileName(File.data(), int(File.size())) << LogMessageSpec.FormatString(Format.data(), int(Format.size())); return Id; } } // namespace Private } // namespace Trace } // namespace UE #endif // TRACE_IMPLEMENT /* vim: set noet foldlevel=1 foldmethod=marker : */