tutorial: added CI
This commit is contained in:
@@ -80,13 +80,12 @@
|
||||
* you are going to specify if you don't like the defaults.
|
||||
**************************************************************************** */
|
||||
|
||||
/* Define this to be the number of bits an `int` takes up on your system. The
|
||||
/* Define this to be the number of bits an `int` takes up on your system. The
|
||||
* default, if not auto-detected, is 32 bits.
|
||||
*
|
||||
* Example:
|
||||
*/
|
||||
/* #define UNITY_INT_WIDTH 16 */
|
||||
#define UNITY_INT_WIDTH 32
|
||||
|
||||
/* Define this to be the number of bits a `long` takes up on your system. The
|
||||
* default, if not autodetected, is 32 bits. This is used to figure out what
|
||||
@@ -97,7 +96,6 @@
|
||||
* Example:
|
||||
*/
|
||||
/* #define UNITY_LONG_WIDTH 16 */
|
||||
#define UNITY_LONG_WIDTH 32
|
||||
|
||||
/* Define this to be the number of bits a pointer takes up on your system. The
|
||||
* default, if not autodetected, is 32-bits. If you're getting ugly compiler
|
||||
@@ -106,7 +104,6 @@
|
||||
* Example:
|
||||
*/
|
||||
/* #define UNITY_POINTER_WIDTH 64 */
|
||||
#define UNITY_POINTER_WIDTH 32
|
||||
|
||||
/* Unity will automatically include 64-bit support if it auto-detects it, or if
|
||||
* your `int`, `long`, or pointer widths are greater than 32-bits. Define this
|
||||
@@ -119,7 +116,6 @@
|
||||
#undef UNITY_INCLUDE_64
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************** FLOATING POINT TYPES ****************************
|
||||
* In the embedded world, it's not uncommon for targets to have no support for
|
||||
* floating point operations at all or to have support that is limited to only
|
||||
@@ -130,11 +126,11 @@
|
||||
* the only option.
|
||||
**************************************************************************** */
|
||||
|
||||
/* By default, Unity guesses that you will want single precision floating point
|
||||
* support, but not double precision. It's easy to change either of these using
|
||||
* the include and exclude options here. You may include neither, just float,
|
||||
* or both, as suits your needs.
|
||||
*/
|
||||
/* By default, Unity guesses that you will want single precision floating point
|
||||
* support, but not double precision. It's easy to change either of these using
|
||||
* the include and exclude options here. You may include neither, just float,
|
||||
* or both, as suits your needs.
|
||||
*/
|
||||
/* #define UNITY_EXCLUDE_FLOAT */
|
||||
/* #define UNITY_INCLUDE_DOUBLE */
|
||||
/* #define UNITY_EXCLUDE_DOUBLE */
|
||||
@@ -190,7 +186,6 @@
|
||||
/* #define UNITY_FLOAT_PRECISION 0.001f */
|
||||
/* #define UNITY_DOUBLE_PRECISION 0.001f */
|
||||
|
||||
|
||||
/* *************************** TOOLSET CUSTOMIZATION ***************************
|
||||
* In addition to the options listed above, there are a number of other options
|
||||
* which will come in handy to customize Unity's behavior for your specific
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
/**
|
||||
* \file test_dummy.c
|
||||
*/
|
||||
|
||||
#include "unity.h"
|
||||
/** \file test_dummy.c */
|
||||
|
||||
#include "dummy/dummy.h"
|
||||
#include "unity.h"
|
||||
|
||||
void setUp(void)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user