/* * top_26_xc.h * * Created on: 16. mars 2021 * Author: teig */ #ifndef TOP_26_XC_H_ #define TOP_26_XC_H_ #if (TOPOLOGY == TOP_26_TESTING_SYNCH_PHASE) #if (TOP_WARNINGS==1) // From "makefile" #warning TOP_STR "Top_26" #endif #else #error Not Top_26 #endif /* _ _ | | | | | | _____ _____| |_ _ | | / _ \ \ / / _ \ | | | | | |___| (_) \ V / __/ | |_| | |______\___/ \_/ \___|_|\__, | __/ | |___/ */ //------------------------------------------------------------------------------------------------- // TOP_20_HAS_2_TILE_004_T0_2N_BY3SUB_T1_2N_BY3SUB_RUNS_FAIR // FAIR and CS_SYNCHRONIZE, but will not scale to more than 2*2 // Not fair, does not synchronise, also see blog note // https://www.teigfam.net/oyvind/home/technology/217-my-xc-combined-combinable-notes/#all_combined typedef int chan_of_t; // ... chans don't have types. It's up to the user to ensure that sent and received are equal //------------------------------------------------------------------------------------------------- // TOP_21_HAS_1_TILE_004_T0_4N_RUNS_FAIR // but does not scale: TOP_23_HAS_2_TILE_004_T0_8N_T1_8N_CHANENDS_OVERRUN typedef interface conn_to_server_if_t { void to_server (const temp_degC_t); } conn_to_server_if_t; typedef interface conn_to_client_if_t { void to_client (const temp_degC_t); } conn_to_client_if_t; /* ____ / __ \ | | | |_ __ ___ _ __ ___ __ _ ___ ___ ___ | | | | '_ \ / _ \ '_ \ / __/ _` / __|/ _ \/ __| | |__| | |_) | __/ | | | | (_| (_| \__ \ __/\__ \ \____/| .__/ \___|_| |_| \___\__,_|___/\___||___/ | | |_| */ //------------------------------------------------------------------------------------------------- // TOP_01 .. TOP_15 // Not fair, does not synchronise, also see blog note // But very [[combinable]] [[combine]] friendly - if they don't stop // https://www.teigfam.net/oyvind/home/technology/217-my-xc-combined-combinable-notes/#all_combined typedef interface conn_if_t_ { {unsigned, temp_degC_t} do_io_server (const temp_degC_t); } conn_if_t_; // //------------------------------------------------------------------------------------------------- // TOP_03A_HAS_2_TILE_016_NODE_T0_2N_AS_CC_6CN_T1_8CN_STOPS // Theoretically very nice, allows for asynchronousity and being in synch and fair, but probably // is not made for multi-clients. See https://www.xcore.com/viewtopic.php?f=26&t=8090 typedef interface a_conn_if_t { [[guarded]] void do_io_server (const unsigned value_to); [[notification]] slave void all_clients_seen (void); [[clears_notification]] {unsigned} get_result (void); } a_conn_if_t; //------------------------------------------------------------------------------------------------- // TOP_03G_HAS_2_TILE_016_NODE_T0_2N_AS_CC_6CN_T1_8CN_NO_BUILD // This [[guarded]] protocol woule have solved the whole thing, but thec ompiler will not build the config typedef interface g_conn_if_t { [[guarded]] {unsigned} do_io_server (const unsigned value_to); } g_conn_if_t; /* _______ ____ _____ ___ __ |__ __/ __ \| __ \ |__ \ / / | | | | | | |__) | ) / /_ | | | | | | ___/ / / '_ \ | | | |__| | | / /| (_) | |_| \____/|_| |____\___/ */ //------------------------------------------------------------------------------------------------- // This is a CLIENT DRIVEN interface only, it therefore introduces polling (with POLL_ALL_SYNCHED). // In order avoid _sliding_ of the tasks which are erquired to run in harmony, in step or being // synchronized and _fair_ (and by this ensure that no data is being sent and then later // overwritten by a newer data set). // This polling solution should not have been necessary. There seems to be some kind of problem with // xTIMEcomposer 14.4.1 (and earlier) where use of [[guarded]], [[notification]] and [[clears_notification]] // is stopped by some internal compiler error when the OK compiled [[combinable]] code (!) is placed on cores, // as [[combine]] par. That error is the "xcc1: internal compiler error: // Failed in /jenkins/RELEASE_14_4/sb/tools_xcc1_c_llvm/FrontEnd/Lowering/lower_combined_pars.cpp, line 183" // Polling typically is a "non-CSP" solution. With the synchronous and event-driven system, where each // conditional choice (as select/case) may be tagged with a boolean "guard" (if evaluated to true the // case switch "is on"), polling is _never_ be necessary. // However, the XC/XCore case is probably "the best" architecture in which to implement polling. Task context // switch time either does not exist or is low (in the combinable case?), and communication is also very fast with // no operating system overhead. And in this particular case all the nodes are supposed to run in step // anyhow, all of them have to wait for the slowest. And the slowest is the root (0 0) which actually may // introduce an active waiting, by ROOT_DELAY_UNTIL_NEXT_ROUND_US. So the price isn't terribly high. But using one of // the mentioned mechanisms will be retried when xTIMEcomposer is updated. I have reported this to XMOS on three // occasions (since 2018). // SET_GET_TEMP and POLL_ALL_SYNCHED are now with capital letters, to try to make the code more readable. // These are often visually and cognitively "hidden" in a long line. typedef interface conn_if_t { // It is theoretically NOT POSSIBLE to include all_clients_seen_t // as a return parameter here. See this discussed in the Server_node_task temp_degC_t SET_GET_TEMP (const temp_degC_t temp_degC); // FAIR and SYNCHRONISED and COMBINABLE! Will some times imply more than one polling per round trip: all_clients_seen_t POLL_ALL_SYNCHED (void); } conn_if_t; // Typical usage: // // ------------------------------------------------------------------------------------------------- // TYPICAL conn_if_t USAGE PATTERN. Legend: @69 means cycle_cnt 69. Search for "quoted" text // Here the client on i_conn_0 is very eager // i_conn_0.SET_GET_TEMP @69 -> // "i_conn_0.POLL_ALL_SYNCHED @69" -> when this client's servers all synched on their clients: // i_conn_0.SET_GET_TEMP @70 (ok collecting _next_ data set) -> // i_conn_0.POLL_ALL_SYNCHED @70 "(69!=70->rejected, poll again)" LATER all at @70 // i_conn_1.SET_GET_TEMP @69 -> are not as eager, but in due course: ### // "i_conn_1.POLL_ALL_SYNCHED @69" -> ### // i_conn_1.SET_GET_TEMP @70 (ok) -> ### // i_conn_1.POLL_ALL_SYNCHED @70 ok: THEN all at @70 // i_conn_2.SET_GET_TEMP @69 -> are not as eager, but in due course: ### // "i_conn_2.POLL_ALL_SYNCHED @69" -> ### // i_conn_2.SET_GET_TEMP @70 (ok) -> ### // i_conn_2.POLL_ALL_SYNCHED @70 ok: THEN all at @70 typedef enum client_state_e { CS_SEND_DO_IO_SERVER, CS_SYNCHRONIZE } client_state_e; // ------------------------------------------------------------------------------------------------------------------ // Client_node_task // // This at first sends data to all three (NUM_CONNS_PER_NODE) servers (but also gets their data back) then polls all // three until all three servers have been visited by all three clients. This makes this architecture "fair" and "synchronized". // The root (0 0) is the main synchronizer, with the synchronising phase on all nodes, makes the whole system tick in lock-step // at ROOT_WAIT_FOR_NEXT_ROUND_US intervals. // Observe that each client is connected to three servers and each server is connected to three clients. This is statically connected. [[combinable]] void Client_node_task ( const unsigned iof_row, const unsigned iof_col, client conn_if_t i_conn_0, // Three static connections to three Server_node_task, but observe that .. client conn_if_t i_conn_1, // .. in the IS_2_BY_2 node situation, the two connections may be with .. client conn_if_t i_conn_2, // .. the same server. This is ok, there is no special case for it out buffered port:4 ?outP4_leds_) { node_context_t context; const params_t params = init_params (); temp_degC_t temp_degC = init_temp (iof_row,iof_col, params); unsigned iof_client = 0; all_clients_seen_t all_clients_seen[NUM_CONNS_PER_NODE] = {false, false, false}; // Also meaning all_in_synch timer tmr; time32_t time_ticks; const bool root = is_root (iof_row, iof_col); client_state_e client_state = CS_SEND_DO_IO_SERVER; unsigned leds = BOARD_LEDS_INIT; if (not isnull (outP4_leds_)) { outP4_leds_ <: leds; } init_wt (iof_row, iof_col, context.wt, params); init_context (iof_row, iof_col, context); if (root) { PRINT_BANNER_ROOT (context, CLIENT_STR, "Client_node_task root", temp_degC, TOP_STR); } else { PRINT_BANNER (context, CLIENT_STR, "Client_node_task", temp_degC); } context.cycle_cnt++; // To get data set to become like TOP_20's tmr :> time_ticks; while (1) { // Client_node_task select { case tmr when timerafter (time_ticks) :> void : { if (client_state == CS_SEND_DO_IO_SERVER) { print_tx(context,CLIENT_STR,iof_client,temp_degC); switch (iof_client) { // First this.. case 0: { PRINT_PROGESS (context,CLIENT_STR,0,0); context.temps_degC[0] = i_conn_0.SET_GET_TEMP (temp_degC); } break; case 1: { PRINT_PROGESS (context,CLIENT_STR,0,1); context.temps_degC[1] = i_conn_1.SET_GET_TEMP (temp_degC); } break; case 2: { PRINT_PROGESS (context,CLIENT_STR,0,2); context.temps_degC[2] = i_conn_2.SET_GET_TEMP (temp_degC); } break; // other: let the system crash } print_rx(context,CLIENT_STR,iof_client,context.temps_degC[iof_client]); iof_client++; // ..then this if (iof_client == NUM_CONNS_PER_NODE) { iof_client = 0; temp_degC = calculate_new_temp_from_flow (context, temp_degC); #if (DO_PRINT_RANGE==0) if (root) { print_values_with_time_mod (context, CLIENT_STR, tmr, temp_degC, DO_PRINT_VALUES_EVERY_CYCLE_CNT); } else { print_values_mod (context, CLIENT_STR, temp_degC, DO_PRINT_VALUES_EVERY_CYCLE_CNT); } #elif (DO_PRINT_RANGE==1) if ((context.cycle_cnt > DO_PRINT_RANGE_CYCLE_CNT_LOW) and (context.cycle_cnt < DO_PRINT_RANGE_CYCLE_CNT_HIGH)) { if (root) { PRINT_VALUES_WITH_TIME (context, CLIENT_STR, tmr, temp_degC); } else { PRINT_VALUES (context, CLIENT_STR, temp_degC); } } #endif context.cycle_cnt++; // at client NUM_CONNS_PER_NODE seen if (not isnull (outP4_leds_)) { // Swap LED leds xor_eq BOARD_LED_MASK_GREEN_ONLY; // J1.7 CH1 XCORE-200-EXPLORER outP4_leds_ <: leds; } client_state = CS_SYNCHRONIZE; tmr :> time_ticks; time_ticks += CLIENT_DELAY_UNTIL_NEXT_SYNCH_POLL_US * XS1_TIMER_MHZ; } else { //doing next SET_GET_TEMP tmr :> time_ticks; time_ticks += CLIENT_DELAY_UNTIL_NEXT_THIRD_US * XS1_TIMER_MHZ; // Allowing skew here, opposite of TOP_20_HAS_2_TILE_004_T0_2N_BY3SUB_T1_2N_BY3SUB_RUNS_FAIR, // since the below tmr_synch would add unknown synch time anyhow // TODO: unless we had that synch be encompassed by the outer tmr } } else if (client_state == CS_SYNCHRONIZE) { // With the 2*2 nodes case, THIS client may have two connections to the same server. // However, we handle the general case PRINT_NL_PROGESS (context,CLIENT_STR,1,0); if (not all_clients_seen[0]) { all_clients_seen[0] = i_conn_0.POLL_ALL_SYNCHED (); print_synxh(context,CLIENT_STR,all_clients_seen[0],all_clients_seen[1],all_clients_seen[2]); } else {} PRINT_PROGESS (context,CLIENT_STR,1,1); if (not all_clients_seen[1]) { all_clients_seen[1] = i_conn_1.POLL_ALL_SYNCHED (); print_synxh(context,CLIENT_STR,all_clients_seen[0],all_clients_seen[1],all_clients_seen[2]); } else {} PRINT_PROGESS (context,CLIENT_STR,1,2); if (not all_clients_seen[2]) { all_clients_seen[2] = i_conn_2.POLL_ALL_SYNCHED (); print_synxh(context,CLIENT_STR,all_clients_seen[0],all_clients_seen[1],all_clients_seen[2]); } else {} PRINT_PROGESS (context,CLIENT_STR,1,200); if (all_clients_seen[0] and all_clients_seen[1] and all_clients_seen[2]) { PRINT_PROGESS (context,CLIENT_STR,1,99); for (unsigned ix=0; ix time_ticks; time_ticks += (ROOT_WAIT_FOR_NEXT_ROUND_US * (unsigned) root); // Delay for root, not for the other clients } else { tmr :> time_ticks; time_ticks += (CLIENT_DELAY_UNTIL_NEXT_SYNCH_POLL_US * XS1_TIMER_MHZ); } } } break; } } } // Client_node_task // --------------------------- // Server_node_task functions // --------------------------- typedef struct synch_t { unsigned from_client_set_get_cnt; unsigned from_client_poll_all_synched_cnt; all_clients_seen_t all_clients_seen; key_t key; // Follows context.cycle_cnt unsigned eager_client_reject_cnt; key_t client_tagged_key [NUM_CONNS_PER_NODE]; // will be initialised when used (static connections) } synch_t; // ------------------------------------------------------------------------------------------------------------------ // Handle_server_set_get // // Is called on each SET_GET_TEMP over client connetions. // Is called exactly NUM_CONNS_PER_NODE before a synchronisation phase takes over, when at that momemnt a new // temperature is also calculated by calculate_new_temp_from_flow temp_degC_t Handle_server_set_get ( node_context_t &context, // io synch_t &synch, // io const temp_degC_t &temp_degC_old) // i { temp_degC_t temp_degC_return; PRINT_PROGESS (context,SERVER_STR,0,0); synch.from_client_set_get_cnt++; if (synch.from_client_set_get_cnt == NUM_CONNS_PER_NODE) { synch.from_client_set_get_cnt = 0; synch.all_clients_seen = true; // PRINT_PROGESS (context,SERVER_STR,0,100); temp_degC_return = calculate_new_temp_from_flow (context, temp_degC_old); #if (DO_PRINT_RANGE==0) print_values_mod (context, SERVER_STR, temp_degC_return, DO_PRINT_VALUES_EVERY_CYCLE_CNT); #elif (DO_PRINT_RANGE==1) if ((context.cycle_cnt > DO_PRINT_RANGE_CYCLE_CNT_LOW) and (context.cycle_cnt < DO_PRINT_RANGE_CYCLE_CNT_HIGH)) { print_values (context, SERVER_STR, temp_degC_return); } #endif context.cycle_cnt++; // at server NUM_CONNS_PER_NODE seen } else { temp_degC_return = temp_degC_old; } return temp_degC_return; } // Handle_server_set_get // ------------------------------------------------------------------------------------------------------------------ // Handle_server_poll_all_clients_seen // // Is called on each poll_all_clients_seen over client connetions. // Is called exactly NUM_CONNS_PER_NODE times, when the synchronisation phase is over. // Clients will not be finished with synchronisation at the same time. all_clients_seen_t Handle_server_poll_all_clients_seen ( const unsigned iof_client, // i node_context_t &context, // io synch_t &synch) // io { all_clients_seen_t all_clients_seen_return; if (synch.client_tagged_key[iof_client] != synch.key) { // "@70 (69!=70->rejected, poll again)" // // Eager client is getting one POLL_ALL_SYNCHED step ahead, which it is not allowed to. This client has (now, // "long ago") already got the POLL_ALL_SYNCHED approved, and after that sent a new SET_GET_TEMP call (which // is ok) but going ahead with another POLL_ALL_SYNCHED is not allowed. This client will retry and per definition // succeed in going on when the other clients of the "long ago" POLL_ALL_SYNCHED have finished. One or // two of them have still not finished that POLL_ALL_SYNCHED, and will come around to doing it here, and // then "open" the synch for this eager client - because they would be on even cycle_cnt. all_clients_seen_return = false; synch.eager_client_reject_cnt++; print_fix_synch (context, synch.client_tagged_key[iof_client], synch.key, iof_client, synch.eager_client_reject_cnt); } else if (synch.all_clients_seen) { all_clients_seen_return = true; synch.from_client_poll_all_synched_cnt++; if (synch.from_client_poll_all_synched_cnt == NUM_CONNS_PER_NODE) { // "i_conn_0.POLL_ALL_SYNCHED @69" // "i_conn_1.POLL_ALL_SYNCHED @69" // "i_conn_2.POLL_ALL_SYNCHED @69" synch.from_client_poll_all_synched_cnt = 0; synch.all_clients_seen = false; synch.key = context.cycle_cnt; // Get "in step" again } } else { all_clients_seen_return = false; } return all_clients_seen_return; } // ------------------------------------------------------------------------------------------------------------------ // Server_node_task // This at first receives data from all three (NUM_CONNS_PER_NODE) clients (but also returns its data back) then engages in a // synchronising phase until all three clients have synchroniseds. However, since one of the connected clients may be "eager" // because it has synchronised with all if its servers, that client may come on with a new data set and return the present // value back. However, it will be stopepd in its attempt to should that client also finish with its three servers even // before this server's clients have finished th initial synchronisation. This wasy they stil end up running in lock-step. [[combinable]] void Server_node_task ( const unsigned iof_row, const unsigned iof_col, server conn_if_t i_conn_0, // Three static connections to three Client_node_task, but observe that .. server conn_if_t i_conn_1, // .. in the IS_2_BY_2 node situation, the two connections may come from .. server conn_if_t i_conn_2) // .. the same client. This is ok, there is no special case for it { node_context_t context; const params_t params = init_params (); temp_degC_t temp_degC = init_temp (iof_row,iof_col, params); synch_t synch; // server_state_e none such, since this server does not know its "state" init_wt (iof_row, iof_col, context.wt, params); init_context (iof_row, iof_col, context); PRINT_BANNER (context, SERVER_STR, "Server_node_task", temp_degC); context.cycle_cnt++; // (*) // . First this.. synch.key = context.cycle_cnt; // ..then this synch.from_client_set_get_cnt = 0; synch.from_client_poll_all_synched_cnt = 0; synch.all_clients_seen = false; synch.eager_client_reject_cnt = 0; // (*) To get data set to become like TOP_20's while (1) { // Server_node_task // [[ordered]] not allowed in combinable task select { //-------------------------------------------------------------------------------------------------- // It is theoretically NOT POSSIBLE to include all_clients_seen_t as a return parameter here since // an eager client or two may advance to the next step of cycle_cnt. // ------------------------------------------------------------------------------------------------ case i_conn_0.SET_GET_TEMP (const temp_degC_t temp_degC_in) -> temp_degC_t temp_degC_return : { temp_degC_return = temp_degC; // always return old value synch.client_tagged_key[0] = context.cycle_cnt; // not from synch.key // context.temps_degC[0] = temp_degC_in; print_rx(context,SERVER_STR,0,temp_degC_in); temp_degC = Handle_server_set_get (context, synch, temp_degC); print_tx(context,SERVER_STR,0,temp_degC_return); } break; case i_conn_1.SET_GET_TEMP (const temp_degC_t temp_degC_in) -> temp_degC_t temp_degC_return : { temp_degC_return = temp_degC; // always return old value synch.client_tagged_key[1] = context.cycle_cnt; // not from synch.key // context.temps_degC[1] = temp_degC_in; print_rx(context,SERVER_STR,1,temp_degC_in); temp_degC = Handle_server_set_get (context, synch, temp_degC); print_tx(context,SERVER_STR,1,temp_degC_return); } break; case i_conn_2.SET_GET_TEMP (const temp_degC_t temp_degC_in) -> temp_degC_t temp_degC_return : { temp_degC_return = temp_degC; // always return old value synch.client_tagged_key[2] = context.cycle_cnt; // not from synch.key // context.temps_degC[2] = temp_degC_in; print_rx(context,SERVER_STR,2,temp_degC_in); temp_degC = Handle_server_set_get (context, synch, temp_degC); print_tx(context,SERVER_STR,2,temp_degC_return); } break; // ------------------------------------------------------------------------------------------------ // Observe that these clients don't know of each other. // The clients poll here because we have found no way to make a "notification type" interface work // for three (NUM_CONNS_PER_NODE) clients using each server // ------------------------------------------------------------------------------------------------ case i_conn_0.POLL_ALL_SYNCHED () -> all_clients_seen_t all_clients_seen_return : { all_clients_seen_return = Handle_server_poll_all_clients_seen (0, context, synch); PRINT_PROGESS (context,SERVER_STR,0,all_clients_seen_return); print_synch(context,SERVER_STR,0, synch.from_client_poll_all_synched_cnt, all_clients_seen_return, synch.key, synch.client_tagged_key[0]); } break; case i_conn_1.POLL_ALL_SYNCHED () -> all_clients_seen_t all_clients_seen_return : { all_clients_seen_return = Handle_server_poll_all_clients_seen (1, context, synch); PRINT_PROGESS (context,SERVER_STR,1,all_clients_seen_return); print_synch(context,SERVER_STR,1, synch.from_client_poll_all_synched_cnt, all_clients_seen_return, synch.key, synch.client_tagged_key[1]); } break; case i_conn_2.POLL_ALL_SYNCHED () -> all_clients_seen_t all_clients_seen_return : { all_clients_seen_return = Handle_server_poll_all_clients_seen (2, context, synch); PRINT_PROGESS (context,SERVER_STR,1,all_clients_seen_return); print_synch(context,SERVER_STR,2, synch.from_client_poll_all_synched_cnt, all_clients_seen_return, synch.key, synch.client_tagged_key[2]); } break; } } } // Server_node_task #if (TOP_26_CONFIG == IS_2_BY_2) int main (void) { conn_if_t conn [NUM_ROWS][NUM_PAIRS_PER_ROW][NUM_CONNS_PER_NODE]; // [ROW][PAIR][CONN] = [R][P][C] // [ 2 ][ 1 ][ 3 ] #define VER_NS_X [1][0][1] // NS North-South top-bottom #define HOR_1_EW [1][0][2] // EW EastWest belt side-to-side #define HOR_1_A [1][0][0] // [A,B,C] Between horisontal columns #define VER_01_X [0][0][1] // [X,Y,Z] Between vertical rows #define HOR_0_EW [0][0][2] // EW EastWest belt side-to-side #define HOR_0_A [0][0][0] // [A,B,C] Between horisontal columns par { on tile[0]: par { Client_node_task (0,0, conn HOR_0_EW, conn VER_NS_X, conn HOR_0_A, outP4_leds); Server_node_task (0,1, conn HOR_0_A, conn VER_01_X, conn HOR_0_EW); } on tile[1]: par { Client_node_task (1,0, conn HOR_1_EW, conn VER_01_X, conn HOR_1_A, null); Server_node_task (1,1, conn HOR_1_A, conn VER_NS_X, conn HOR_1_EW); } } return 0; } // main #elif (TOP_26_CONFIG == IS_4_BY_4) int main (void) { conn_if_t conn [NUM_ROWS][NUM_PAIRS_PER_ROW][NUM_CONNS_PER_NODE]; // [ROW][PAIR][CONN] = [R][P][C] // [ 4 ][ 2 ][ 3 ] #define VER_NS_Y [3][1][1] // NS North-South top-bottom #define VER_NS_X [3][0][1] // NS North-South top-bottom #define HOR_3_EW [3][1][2] // EW EastWest belt side-to-side #define HOR_3_C [3][1][0] // [A,B,C] Between horisontal columns #define HOR_3_B [3][0][2] // [A,B,C] Between horisontal columns #define HOR_3_A [3][0][0] // [A,B,C] Between horisontal columns #define VER_23_Y [2][1][1] // [X,Y,Z] Between vertical rows #define VER_23_X [2][0][1] // [X,Y,Z] Between vertical rows #define HOR_2_EW [2][1][2] // EW EastWest belt side-to-side #define HOR_2_C [2][1][0] // [A,B,C] Between horisontal columns #define HOR_2_B [2][0][2] // [A,B,C] Between horisontal columns #define HOR_2_A [2][0][0] // [A,B,C] Between horisontal columns #define VER_12_Y [1][1][1] // [X,Y,Z] Between vertical rows #define VER_12_X [1][0][1] // [X,Y,Z] Between vertical rows #define HOR_1_EW [1][1][2] // EW EastWest belt side-to-side #define HOR_1_C [1][1][0] // [A,B,C] Between horisontal columns #define HOR_1_B [1][0][2] // [A,B,C] Between horisontal columns #define HOR_1_A [1][0][0] // [A,B,C] Between horisontal columns #define VER_01_Y [0][1][1] // [X,Y,Z] Between vertical rows #define VER_01_X [0][0][1] // [X,Y,Z] Between vertical rows #define HOR_0_EW [0][1][2] // EW EastWest belt side-to-side #define HOR_0_C [0][1][0] // [A,B,C] Between horisontal columns #define HOR_0_B [0][0][2] // [A,B,C] Between horisontal columns #define HOR_0_A [0][0][0] // [A,B,C] Between horisontal columns /* */ #if (TOP_26_AS_PARTLY_COMBINED==0) par { on tile[0]: par { Client_node_task (0,0, conn HOR_0_EW, conn VER_NS_X, conn HOR_0_A, outP4_leds); Server_node_task (0,1, conn HOR_0_A, conn VER_01_X, conn HOR_0_B); Client_node_task (0,2, conn HOR_0_B, conn VER_NS_Y, conn HOR_0_C, null); Server_node_task (0,3, conn HOR_0_C, conn VER_01_Y, conn HOR_0_EW); Client_node_task (1,0, conn HOR_1_EW, conn VER_01_X, conn HOR_1_A, null); Server_node_task (1,1, conn HOR_1_A, conn VER_12_X, conn HOR_1_B); Client_node_task (1,2, conn HOR_1_B, conn VER_01_Y, conn HOR_1_C, null); Server_node_task (1,3, conn HOR_1_C, conn VER_12_Y, conn HOR_1_EW); } on tile[1]: par { Client_node_task (2,0, conn HOR_2_EW, conn VER_12_X, conn HOR_2_A, null); Server_node_task (2,1, conn HOR_2_A, conn VER_23_X, conn HOR_2_B); Client_node_task (2,2, conn HOR_2_B, conn VER_12_Y, conn HOR_2_C, null); Server_node_task (2,3, conn HOR_2_C, conn VER_23_Y, conn HOR_2_EW); Client_node_task (3,0, conn HOR_3_EW, conn VER_23_X, conn HOR_3_A, null); Server_node_task (3,1, conn HOR_3_A, conn VER_NS_X, conn HOR_3_B); Client_node_task (3,2, conn HOR_3_B, conn VER_23_Y, conn HOR_3_C, null); Server_node_task (3,3, conn HOR_3_C, conn VER_NS_Y, conn HOR_3_EW); } } #elif (TOP_26_AS_PARTLY_COMBINED==1) par { on tile[0]: par { Client_node_task (0,0, conn HOR_0_EW, conn VER_NS_X, conn HOR_0_A, outP4_leds); Server_node_task (0,1, conn HOR_0_A, conn VER_01_X, conn HOR_0_B); } on tile[0]: [[combine]] par { Client_node_task (0,2, conn HOR_0_B, conn VER_NS_Y, conn HOR_0_C, null); Server_node_task (0,3, conn HOR_0_C, conn VER_01_Y, conn HOR_0_EW); Client_node_task (1,0, conn HOR_1_EW, conn VER_01_X, conn HOR_1_A, null); Server_node_task (1,1, conn HOR_1_A, conn VER_12_X, conn HOR_1_B); Client_node_task (1,2, conn HOR_1_B, conn VER_01_Y, conn HOR_1_C, null); Server_node_task (1,3, conn HOR_1_C, conn VER_12_Y, conn HOR_1_EW); Client_node_task (2,0, conn HOR_2_EW, conn VER_12_X, conn HOR_2_A, null); Server_node_task (2,1, conn HOR_2_A, conn VER_23_X, conn HOR_2_B); Client_node_task (2,2, conn HOR_2_B, conn VER_12_Y, conn HOR_2_C, null); Server_node_task (2,3, conn HOR_2_C, conn VER_23_Y, conn HOR_2_EW); Client_node_task (3,0, conn HOR_3_EW, conn VER_23_X, conn HOR_3_A, null); Server_node_task (3,1, conn HOR_3_A, conn VER_NS_X, conn HOR_3_B); Client_node_task (3,2, conn HOR_3_B, conn VER_23_Y, conn HOR_3_C, null); Server_node_task (3,3, conn HOR_3_C, conn VER_NS_Y, conn HOR_3_EW); } } #endif return 0; } // main #endif #endif /* TOP_24_XC_H_ */