/* * top_03_asynch_xc.h * * Created on: 25. feb. 2021 * Author: teig */ #ifndef TOP_03_ASYNCH_XC_H_ #define TOP_03_ASYNCH_XC_H_ #if (TOPOLOGY != TOP_03A_HAS_2_TILE_016_NODE_T0_2N_AS_CC_6CN_T1_8CN_STOPS) // in _xc_test_combinable.xc #error #endif // THE PROBLEM IS (I think) that we probably need some barrier, since I think [[notification]] and [[clear_notofication]] // are meant to handle a single client-server relationship only. Not with three clients "inside", as here. // See https://www.xcore.com/viewtopic.php?f=26&t=8090 "[[notification]] and [[clears_notification]] only for single client usage?" // BASICALLY THIS PROBABLY IS A DEAD END // Aside: I have some barrier code in https://www.teigfam.net/oyvind/home/technology/215-my-xc-code-downloads-page/ #209.alive #if (ASYNCH_USE_CLEARS_NOTIFICATION==1) #define CLEARS_NOTIFICATION [[clears_notification]] #else #define CLEARS_NOTIFICATION #endif #if (ASYNCH_GUARDED_DO_IO_SERVER == 0) typedef interface a_conn_if_t { void do_io_server (const unsigned value_to, const pos_t pos, const unsigned call_cnt); [[notification]] slave void all_clients_seen (void); CLEARS_NOTIFICATION {pos_t, unsigned} get_result (void); } a_conn_if_t; typedef float temp_degC_t; typedef interface a_con_if_t { void SET_TEMP (const temp_degC_t temp_degC_in); [[notification]] slave void ALL_CLIENTS_SEEN (void); [[clears_notification]] temp_degC_t GET_TEMP (void); } a_con_if_t; typedef interface x_conn_if_t { void do_io_server (const unsigned value_to, const pos_t pos, const unsigned call_cnt); [[notification]] slave void all_clients_seen (void); {pos_t, unsigned} get_result (void); } x_conn_if_t; #elif (ASYNCH_GUARDED_DO_IO_SERVER == 1) typedef interface a_conn_if_t { [[guarded]] void do_io_server (const unsigned value_to, const pos_t pos, const unsigned call_cnt); [[notification]] slave void all_clients_seen (void); CLEARS_NOTIFICATION {pos_t, unsigned} get_result (void); } a_conn_if_t; // This has to do with [[guarded]] // XMOS ticket #158323# on 2021 02 26 // XMOS ticket #32474# on 2020 02 11 // XMOS ticket #31286# on 2018 06 26 // xcc1: internal compiler error // Failed in // /jenkins/RELEASE_14_4/sb/tools_xcc1_c_llvm/FrontEnd/Lowering/lower_combined_pars.cpp, line 183 // info->stateObj // I have reported this to XMOS some years ago, but it still exists on 14.4.1 #endif typedef struct server_returns_t { pos_t pos; unsigned value; } server_returns_t; // TASK Server_node_asynch // [[combinable]] void Server_node_asynch ( const unsigned iof_row, const unsigned iof_col, server a_conn_if_t i_conns[NUM_CONNS_PER_NODE], // all_conns[iof_row][iof_pair] const unsigned iof_pair) { server_context_t context; server_returns_t ret; // Same return value to all clients bool guard_do_io_server = true; // start by allowing client calls bool guard_do_all_clients_seen = false; unsigned async_get_result_cnt = 0; init_value (iof_row, iof_col, context.vals); debug_print_extra ("(%u %u):S[%u] = %u.%u.d%u\n", iof_row, iof_col, context.vals.value, iof_row, iof_pair, NUM_CONNS_PER_NODE); // un-const ok, since I don't change them anyhow: context.my_pos.iof_row = iof_row; context.my_pos.iof_col = iof_col; context.cycle_cnt = 0; context.num_neighbours_seen = 0; context.time_tmr :> context.time_now_ticks; context.time_previous_ticks = context.time_now_ticks; while (1) { select { case guard_do_all_clients_seen => context.time_tmr when timerafter (context.time_now_ticks) :> void : { guard_do_all_clients_seen = false; debug_print_extra (" B(%u %u):s all_clients_seen send [%u]\n", iof_row, iof_col, async_get_result_cnt); switch (async_get_result_cnt) { case 0: { i_conns[0].all_clients_seen(); } break; case 1: { i_conns[1].all_clients_seen(); } break; case 2: { i_conns[2].all_clients_seen(); } break; // case NUM_CONNS_PER_NODE: { guard_do_io_server = true; } break; } debug_print_extra (" B(%u %u):s all_clients_seen done [%u]\n", iof_row, iof_col, async_get_result_cnt); } break; #if (ASYNCH_GUARDED_DO_IO_SERVER == 0) case i_conns[const unsigned iof_connection].do_io_server ( #elif (ASYNCH_GUARDED_DO_IO_SERVER == 1) case guard_do_io_server => i_conns[const unsigned iof_connection].do_io_server ( #endif const unsigned value_to, const pos_t client_pos, const unsigned call_cnt) : { server_returns_t ret_tmp; context.iof_client = context.num_neighbours_seen; context.iof_connections[context.iof_client] = iof_connection; debug_print_extra (" A(%u %u):s do_io_server conn %u (%u %u)\n", iof_row, iof_col, iof_connection, client_pos.iof_row, client_pos.iof_col); // {ret_tmp.pos, ret_tmp.value} = Handle_server (value_to, client_pos, call_cnt, context); debug_print_special (" s1 (%u %u) num %u with (%u %u) value %u async_get_result_cnt %u\n", iof_row, iof_col, context.num_neighbours_seen, client_pos.iof_row, client_pos.iof_col, ret_tmp.value, async_get_result_cnt); if (context.num_neighbours_seen == 0) { // 0 set in Handle_server when NUM_CONNS_PER_NODE received guard_do_io_server = false; // No more do_io_server for a while guard_do_all_clients_seen = true; // Start first all_clients_seen.. context.time_tmr :> context.time_now_ticks; // ..now async_get_result_cnt = 0; ret = ret_tmp; // Just the calculated value is to be returned } else {} } break; case i_conns[const unsigned iof_connection].get_result (void) -> {pos_t pos_return, unsigned value_from} : { pos_return = ret.pos; value_from = ret.value; async_get_result_cnt++; debug_print_extra (" C(%u %u):s get_result conn %u next [%u]\n", iof_row, iof_col, iof_connection, async_get_result_cnt); debug_print_special (" C(%u %u) pos (%u %u) value %u async_get_result_cnt %u\n", iof_row, iof_col, ret.pos.iof_row, ret.pos.iof_col, ret.value, async_get_result_cnt); guard_do_all_clients_seen = true; // Maybe more all_clients_seen.. context.time_tmr :> context.time_now_ticks; // ..now } break; } } } // Server_node_asynch [[combinable]] void Client_node_asynch ( const unsigned iof_row, const unsigned iof_col, client a_conn_if_t i_conn_0, // Hard coded: not possible to use NUM_CONNS_PER_NODE-dimensioned array here const unsigned iof_row_0, const unsigned iof_pair_0, const unsigned iof_link_0, client a_conn_if_t i_conn_1, const unsigned iof_row_1, const unsigned iof_pair_1, const unsigned iof_link_1, client a_conn_if_t i_conn_2, const unsigned iof_row_2, const unsigned iof_pair_2, const unsigned iof_link_2) { client_context_t context; unsigned async_get_result_cnt = 0; bool allow_do_io_server = true; init_value (iof_row, iof_col, context.vals); debug_print_extra ("(%u %u):C[%u] = %u.%u.%u - %u.%u.%u - %u.%u.%u\n", iof_row, iof_col, context.vals.value, iof_row_0, iof_pair_0, iof_link_0, iof_row_1, iof_pair_1, iof_link_1, iof_row_2, iof_pair_2, iof_link_2); // un-const ok, since I don't change them anyhow: context.my_pos.iof_row = iof_row; context.my_pos.iof_col = iof_col; context.cycle_cnt = 0; context.iof_server = 0; context.call_cnt = 0; context.time_tmr :> context.timeout_ticks; // AFTER now = immediate context.time_previous_ticks = context.timeout_ticks; while (1) { select { case allow_do_io_server => context.time_tmr when timerafter (context.timeout_ticks) :> void : { // Handle_client_asynch moved in here to make it simpler to understand bool all_servers_handled = false; { debug_print_extra ("A(%u %u):c do_io_server call %u\n", iof_row, iof_col, context.iof_server); // DEBUG_PRINT_HANDLE_CLIENT_1 (context); switch (context.iof_server) { // Hard coded with NUM_CONNS_PER_NODE==3 case 0 : { i_conn_0.do_io_server (context.vals.value, context.my_pos, context.call_cnt); } break; case 1 : { i_conn_1.do_io_server (context.vals.value, context.my_pos, context.call_cnt); } break; case 2 : { i_conn_2.do_io_server (context.vals.value, context.my_pos, context.call_cnt); } break; // default: break; // Not possible, crash } debug_print_extra ("A(%u %u):c do_io_server done %u\n", iof_row, iof_col, context.iof_server); context.call_cnts[context.iof_server] = context.call_cnt; DEBUG_PRINT_HANDLE_CLIENT_2 (context); context.iof_server++; context.call_cnt = (context.call_cnt+1) % CALL_CNT_MAX; context.time_tmr :> context.timeout_ticks; // AFTER now = immediate (i.e. as often as possible), or will get added ROOT_WAIT_FOR_NEXT_ROUND_US later all_servers_handled = (context.iof_server == NUM_CONNS_PER_NODE); if (all_servers_handled) { const unsigned old_value = context.vals.value; time32_t time_used_ticks; bool overflow; {overflow, context.vals.value} = Calculate (context.vals); // In Handle_client context.iof_server = 0; context.time_tmr :> context.timeout_ticks; time_used_ticks = context.timeout_ticks - context.time_previous_ticks; if (time_used_ticks > 0) { const unsigned time_sec = (unsigned) time_used_ticks / (unsigned) XS1_TIMER_HZ; } else { fail ("time_used_ticks <= 0"); // Zero time or longer than some 21 seconds (2exp31 * 10 ns) } context.time_previous_ticks = context.timeout_ticks; DEBUG_PRINT_HANDLE_CLIENT_3 (context, old_value, overflow); context.cycle_cnt++; } else {} context.timeout_ticks += ROOT_WAIT_FOR_NEXT_ROUND_US; // delay_tics used in Server_node. No skew here } if (all_servers_handled) { async_get_result_cnt = 0; allow_do_io_server = false; } else {} } break; case i_conn_0.all_clients_seen() : { debug_print_extra ("B(%u %u):c all_clients_seen received 0, send get_result\n", iof_row, iof_col); {context.server_pos[0], context.vals.values[0]} = i_conn_0.get_result (); debug_print_extra ("C(%u %u):c get_result done 0\n", iof_row, iof_col); async_get_result_cnt++; if (async_get_result_cnt == NUM_CONNS_PER_NODE) { allow_do_io_server = true; context.time_tmr :> context.timeout_ticks; // AFTER now = immediate context.timeout_ticks += ROOT_WAIT_FOR_NEXT_ROUND_US; } else {} } break; case i_conn_1.all_clients_seen() : { debug_print_extra ("B(%u %u):c all_clients_seen received 1, send get_result\n", iof_row, iof_col); {context.server_pos[1], context.vals.values[1]} = i_conn_1.get_result (); debug_print_extra ("C(%u %u):c get_result done 1\n", iof_row, iof_col); async_get_result_cnt++; if (async_get_result_cnt == NUM_CONNS_PER_NODE) { allow_do_io_server = true; context.time_tmr :> context.timeout_ticks; // AFTER now = immediate context.timeout_ticks += ROOT_WAIT_FOR_NEXT_ROUND_US; } else {} } break; case i_conn_2.all_clients_seen() : { debug_print_extra ("B(%u %u):c all_clients_seen received 2, send get_result\n", iof_row, iof_col); {context.server_pos[2], context.vals.values[2]} = i_conn_2.get_result (); debug_print_extra ("C(%u %u):c get_result done 2\n", iof_row, iof_col); async_get_result_cnt++; if (async_get_result_cnt == NUM_CONNS_PER_NODE) { allow_do_io_server = true; context.time_tmr :> context.timeout_ticks; // AFTER now = immediate context.timeout_ticks += ROOT_WAIT_FOR_NEXT_ROUND_US; } else {} } break; } } } // Client_node_guarded #endif /* TOP_03_ASYNCH_XC_H_ */