Home

Resume

Blog

Teikitu


/* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/*  »Project«   Teikitu Gaming System (TgS) (∂)
    »File«      TgS (CLANG-X86) Common - Base - API - Platform [Atomic].i_inc
    »Author«    Andrew Aye (EMail: mailto:andrew.aye@gmail.com, Web: http://www.andrewaye.com)
    »Version«   4.51 / »GUID« A9981407-3EC9-42AF-8B6F-8BE6DD919615                                                                                                        */
/*   -------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
/*  Copyright: © 2002-2017, Andrew Aye.  All Rights Reserved.
    This software is free for non-commercial use.  Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
    following conditions are met:
    Redistribution of source code must retain this copyright notice, this list of conditions and the following disclaimers.
    Redistribution in binary form must reproduce this copyright notice, this list of conditions and the following disclaimers in the documentation and other materials
    provided with the distribution.
    The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
    The intellectual property rights of the algorithms used reside with Andrew Aye.
    You may not use this software, in whole or in part, in support of any commercial product without the express written consent of the author.
    There is no warranty or other guarantee of fitness of this software for any purpose. It is provided solely "as is".                                                   */
/* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* == Common ============================================================================================================================================================ */

/* -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-. */
/*  Public Functions                                                                                                                                                      */
/* -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-. */

/* ---- Atomic ---------------------------------------------------------------------------------------------------------------------------------------------------------- */


/* ---- F(tgAM, _WRITE_FENCE) ------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAM, _WRITE_FENCE)( TgVOID )
{
    _mm_sfence();
}


/* ---- F(tgAM, _READ_FENCE) -------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAM, _READ_FENCE)( TgVOID )
{
    _mm_lfence();
}


/* ---- F(tgAM, _FULL_FENCE) -------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAM, _FULL_FENCE)( TgVOID )
{
    _mm_mfence();
}


/* ---- Atomic Pointer Functions ---------------------------------------------------------------------------------------------------------------------------------------- */

/* ---- F(tgAMPT, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE P_TgVOID F(tgAMPT, _READ)( volatile TgATOMIC_PVOID *piTarget )
{
    return ((P_TgVOID)__atomic_load_n((volatile TgUINTPTR*)piTarget, __ATOMIC_SEQ_CST));
}


/* ---- F(tgAMPT, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAMPT, _WRITE)( volatile TgATOMIC_PVOID *piTarget, PC_TgVOID pVal )
{
    __atomic_store_n((volatile TgUINTPTR*)piTarget, (TgUINTPTR)pVal, __ATOMIC_SEQ_CST);
}


/* ---- F(tgAMPT, _XCHG) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE P_TgVOID F(tgAMPT, _XCHG)( volatile TgATOMIC_PVOID *piTarget, PC_TgVOID pVal )
{
    return ((P_TgVOID)__atomic_exchange_n( (volatile TgUINTPTR*)piTarget, (TgUINTPTR)pVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAMPT, _XCMP) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE P_TgVOID F(tgAMPT, _XCMP)( volatile TgATOMIC_PVOID *piTarget, PC_TgVOID pVal, PC_TgVOID pCmp )
{
    TgUINTPTR uiCmp = (TgUINTPTR)pCmp;
    __atomic_compare_exchange_n( (volatile TgUINTPTR*)piTarget, &uiCmp, (TgUINTPTR)pVal, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
    return ((P_TgVOID)uiCmp);
}


#if defined(TgCOMPILE_64BIT_POINTER) && TgCOMPILE_64BIT_POINTER

/* ---- F(tgAMXX, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _READ)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)F(tgAM64, _READ)( (volatile TgATOMIC_SINT64*)piTarget ));
}


/* ---- F(tgAMXX, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAMXX, _WRITE)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    F(tgAM64, _WRITE)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal );
}


/* ---- F(tgAMXX, _XADD) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XADD)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)tgAM64_XADD( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _XSUB) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XSUB)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM64, _XSUB)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _AND) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _AND)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM64, _AND)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _OR) -------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _OR)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM64, _OR)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _XOR) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XOR)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM64, _XOR)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _INC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _INC)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)F(tgAM64, _INC)( (volatile TgATOMIC_SINT64*)piTarget ));
}


/* ---- F(tgAMXX, _DEC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _DEC)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)F(tgAM64, _DEC)( (volatile TgATOMIC_SINT64*)piTarget ));
}


/* ---- F(tgAMXX, _XCHG) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XCHG)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM64, _XCHG)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT64)uiVal ));
}


/* ---- F(tgAMXX, _XCMP) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XCMP)( volatile TgATOMIC_UINTXX *puiTarget, C_TgUINTXX uiVal, C_TgUINTXX uiCmp )
{
    return ((TgSIZE)F(tgAM64, _XCMP)( (volatile TgATOMIC_SINT64*)puiTarget, (C_TgSINT64)uiVal, (C_TgSINT64)uiCmp ));
}


/* ---- F(tgAMXX, _XCMP2) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgBOOL F(tgAMXX, _XCMP2)( volatile TgATOMIC_UINTXX *puiTarget, C_TgUINTXX uiHigh, C_TgUINTXX uiLow, PC_TgUINTXX puiCmpResult )
{
    unsigned __int128 uiCmp = (((unsigned __int128)puiCmpResult[1]) << 64) | puiCmpResult[0];
    unsigned __int128 uiVal = (((unsigned __int128)uiHigh) << 64) | uiLow;
    union
    {
        volatile TgATOMIC_UINTXX *pui64;
        volatile unsigned __int128 *pui128;
    } sTarget;
    sTarget.pui64 = puiTarget;
    return (0 != __atomic_compare_exchange_n( sTarget.pui128, &uiCmp, uiVal, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ? TgTRUE : TgFALSE);
}


#elif defined(TgCOMPILE_32BIT_POINTER) && TgCOMPILE_32BIT_POINTER

/* ---- F(tgAMXX, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _READ)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)*piTarget);
}


/* ---- F(tgAMXX, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAMXX, _WRITE)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return F(tgAM32, _READ)( (volatile TgATOMIC_SINT64*)piTarget, (C_TgSINT32)uiVal );
}


/* ---- F(tgAMXX, _XADD) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XADD)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _XADD)( (volatile TgATOMIC_SINT32*)piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _XSUB) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XSUB)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _XSUB)( (volatile TgATOMIC_SINT32*)piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _AND) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _AND)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _AND)( (volatile TgATOMIC_SINT32*)piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _OR) -------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _OR)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _OR)( (volatile TgATOMIC_SINT32*)piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _XOR) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XOR)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _XOR)( (volatile TgATOMIC_SINT32*)piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _INC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _INC)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)F(tgAM32, _INC)( (volatile TgATOMIC_SINT32*)piTarget ));
}


/* ---- F(tgAMXX, _DEC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _DEC)( volatile TgATOMIC_UINTXX *piTarget )
{
    return ((TgUINTXX)F(tgAM32, _DEC)( (volatile TgATOMIC_SINT32*)piTarget ));
}


/* ---- F(tgAMXX, _XCHG) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XCHG)( volatile TgATOMIC_UINTXX *piTarget, C_TgUINTXX uiVal )
{
    return ((TgUINTXX)F(tgAM32, _XCHG)( piTarget, (C_TgSINT32)uiVal ));
}


/* ---- F(tgAMXX, _XCMP) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgUINTXX F(tgAMXX, _XCMP)( volatile TgATOMIC_UINTXX *puiTarget, C_TgUINTXX uiVal, C_TgUINTXX uiCmp )
{
    return ((TgSIZE)F(tgAM32, _XCMP)( (volatile TgATOMIC_SINT32*)puiTarget, (C_TgSINT32)uiVal, (C_TgSINT32)uiCmp ));
}


/* ---- F(tgAMXX, _XCMP2) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgBOOL F( tgAMXX, _XCMP2 )(volatile TgATOMIC_UINTXX *puiTarget, C_TgUINTXX uiHigh, C_TgUINTXX uiLow, PC_TgUINTXX puiCmpResult)
{
#error
}


#else
    #error Unsupported pointer size
#endif




/* ---- Atomic 16bit Functions ------------------------------------------------------------------------------------------------------------------------------------------ */
#if TgCOMPILE_16BIT_ATOMIC

/* ---- F(tgAM16, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _READ)( volatile TgATOMIC_SINT16 *piTarget )
{
    return (*piTarget);
}


/* ---- F(tgAM16, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _WRITE)( volatile TgATOMIC_SINT16 *piTarget, C_TgSINT16 iVal )
{
    return (__atomic_exchange_n( piTarget, pVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM16, _AND) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _AND)( volatile TgATOMIC_SINT16 *piTarget, C_TgSINT16 iVal )
{
    return (tgInterlockedAnd16( piTarget, iVal ));
}


/* ---- F(tgAM16, _OR) -------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _OR)( volatile TgATOMIC_SINT16 *piTarget, C_TgSINT16 iVal )
{
    return (tgInterlockedOr16( piTarget, iVal ));
}


/* ---- F(tgAM16, _XOR) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _XOR)( volatile TgATOMIC_SINT16 *piTarget, C_TgSINT16 iVal )
{
    return (tgInterlockedXor16( piTarget, iVal ));
}


/* ---- F(tgAM16, _INC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _INC)( volatile TgATOMIC_SINT16 *piTarget )
{
    return (tgInterlockedIncrement16( piTarget ));
}


/* ---- F(tgAM16, _DEC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT16 F(tgAM16, _DEC)( volatile TgATOMIC_SINT16 *piTarget )
{
    return (tgInterlockedDecrement16( piTarget ));
}

/*# TgCOMPILE_16BIT_ATOMIC */
#endif




/* ---- Atomic 32bit Functions ------------------------------------------------------------------------------------------------------------------------------------------ */

/* ---- F(tgAM32, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _READ)( volatile TgATOMIC_SINT32 *piTarget )
{
    return (__atomic_load_n(piTarget, __ATOMIC_SEQ_CST));
}


/* ---- F(tgAM32, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAM32, _WRITE)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    __atomic_store_n( piTarget, iVal, __ATOMIC_SEQ_CST );
}


/* ---- F(tgAM32, _XADD) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _XADD)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_fetch_add( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _XSUB) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _XSUB)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_fetch_sub( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _AND) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _AND)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_and_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _OR) -------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _OR)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_or_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _XOR) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _XOR)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_xor_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _INC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _INC)( volatile TgATOMIC_SINT32 *piTarget )
{
    return (__atomic_add_fetch( piTarget, 1, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _DEC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _DEC)( volatile TgATOMIC_SINT32 *piTarget )
{
    return (__atomic_sub_fetch( piTarget, 1, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _XCHG) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _XCHG)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal )
{
    return (__atomic_exchange_n( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM32, _XCMP) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* R: The function returns the initial value of the target.                                                                                                               */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT32 F(tgAM32, _XCMP)( volatile TgATOMIC_SINT32 *piTarget, C_TgSINT32 iVal, C_TgSINT32 iCmp )
{
    TgSINT32 iCmpP = iCmp;
    __atomic_compare_exchange_n( piTarget, &iCmpP, iVal, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
    return (iCmpP);
}


/* ---- F(tgAM64, _XCMP) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* R: The function returns the initial value of the target.                                                                                                               */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _XCMP)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal, C_TgSINT64 iCmp )
{
    TgSINT64 iCmpP = iCmp;
    __atomic_compare_exchange_n( piTarget, &iCmpP, iVal, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
    return (iCmpP);
}




/* ---- Atomic 64bit Functions ------------------------------------------------------------------------------------------------------------------------------------------ */
#if TgCOMPILE_64BIT_ATOMIC

/* ---- F(tgAM64, _READ) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _READ)( volatile TgATOMIC_SINT64 *piTarget )
{
    return (__atomic_load_n(piTarget, __ATOMIC_SEQ_CST));
}


/* ---- F(tgAM64, _WRITE) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgVOID F(tgAM64, _WRITE)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    __atomic_store_n( piTarget, iVal, __ATOMIC_SEQ_CST );
}


/* ---- F(tgAM64, _XADD) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _XADD)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_fetch_add( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _XSUB) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _XSUB)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_fetch_sub( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _AND) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _AND)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_and_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _OR) -------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _OR)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_or_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _XOR) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _XOR)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_xor_fetch( piTarget, iVal, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _INC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _INC)( volatile TgATOMIC_SINT64 *piTarget )
{
    return (__atomic_add_fetch( piTarget, 1, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _DEC) ------------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _DEC)( volatile TgATOMIC_SINT64 *piTarget )
{
    return (__atomic_sub_fetch( piTarget, 1, __ATOMIC_SEQ_CST ));
}


/* ---- F(tgAM64, _XCHG) ------------------------------------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
TgINLINE TgSINT64 F(tgAM64, _XCHG)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iVal )
{
    return (__atomic_exchange_n( piTarget, iVal, __ATOMIC_SEQ_CST ));
}

/* ---- F(tgAM64, _XCMP2) ----------------------------------------------------------------------------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- */
CLANG_WARN_DISABLE_PUSH(sign-conversion)
TgINLINE TgBOOL F(tgAM64, _XCMP2)( volatile TgATOMIC_SINT64 *piTarget, C_TgSINT64 iHigh, C_TgSINT64 iLow, P_TgSINT64 piCmpResult )
{
    __int128 uiCmp = (__int128)((((unsigned __int128)piCmpResult[1]) << 64) | piCmpResult[0]);
    unsigned __int128 uiVal = (((unsigned __int128)iHigh) << 64) | iLow;
    union
    {
        volatile TgATOMIC_SINT64 *pui64;
        volatile __int128 *pui128;
    } sTarget;
    
    sTarget.pui64 = piTarget;

    TgERROR( 0 == ((TgUINTPTR)piTarget) % 16 );
    TgERROR( 0 == ((TgUINTPTR)piCmpResult) % 16 );
    return (0 != __atomic_compare_exchange_n( sTarget.pui128, &uiCmp, uiVal, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ? TgTRUE : TgFALSE);
}
CLANG_WARN_DISABLE_POP(sign-conversion)


/*# TgCOMPILE_64BIT_ATOMIC */
#endif