You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

qspn.c 34KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /* This file is part of Netsukuku
  2. * (c) Copyright 2005 Andrea Lo Pumo aka AlpT <alpt@freaknet.org>
  3. *
  4. * This source code is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as published
  6. * by the Free Software Foundation; either version 2 of the License,
  7. * or (at your option) any later version.
  8. *
  9. * This source code is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * Please refer to the GNU Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Public License along with
  15. * this source code; if not, write to:
  16. * Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. *
  18. * --
  19. * qspn.c:
  20. *
  21. * Here there is the code that implements the Quantum Shortest Path Netsukuku
  22. * meta-algorithm, the heart of Netsukuku.
  23. */
  24. #include "includes.h"
  25. #include "endianness.h"
  26. #include "bmap.h"
  27. #include "route.h"
  28. #include "request.h"
  29. #include "pkts.h"
  30. #include "tracer.h"
  31. #include "qspn.h"
  32. #include "igs.h"
  33. #include "netsukuku.h"
  34. #include "common.h"
  35. void qspn_set_map_vars(u_char level, map_node **map, map_node **root_node,
  36. int *root_node_pos, map_gnode **gmap)
  37. {
  38. if(!level) {
  39. if(map)
  40. *map=me.int_map;
  41. if(root_node)
  42. *root_node=me.cur_node;
  43. if(root_node_pos)
  44. *root_node_pos=pos_from_node(me.cur_node, me.int_map);
  45. } else {
  46. if(map)
  47. *map=(map_node *)me.ext_map[_EL(level)];
  48. if(gmap)
  49. *gmap=me.ext_map[_EL(level)];
  50. if(root_node)
  51. *root_node=&me.cur_quadg.gnode[_EL(level)]->g;
  52. if(root_node_pos)
  53. *root_node_pos=me.cur_quadg.gid[level];
  54. }
  55. }
  56. /*
  57. * qspn_time_reset: Reset the qspn time of all the levels that go from
  58. * `start_level' to `end_level'. The total number of effective levels is
  59. * specified in `levels'.
  60. */
  61. void qspn_time_reset(int start_level, int end_level, int levels)
  62. {
  63. struct timeval cur_t;
  64. int i;
  65. if(end_level <= start_level)
  66. end_level = start_level+1;
  67. /*
  68. * We fake the cur_qspn_time, so qspn_round_left thinks that a
  69. * qspn_round was already sent
  70. */
  71. gettimeofday(&cur_t, 0);
  72. cur_t.tv_sec-=QSPN_WAIT_ROUND_LVL(levels)*2;
  73. for(i=start_level; i < end_level; i++)
  74. memcpy(&me.cur_qspn_time[i], &cur_t, sizeof(struct timeval));
  75. }
  76. void qspn_reset_counters(u_char levels)
  77. {
  78. /* Reset the qspn counters */
  79. qspn_time_reset(0, levels, levels);
  80. qspn_reset_gcount(qspn_gnode_count, GCOUNT_LEVELS, 1);
  81. qspn_reset_gcount(qspn_old_gcount, GCOUNT_LEVELS, 1);
  82. }
  83. void qspn_reset(u_char levels)
  84. {
  85. setzero(qspn_b, sizeof(struct qspn_buffer *)*levels);
  86. setzero(qspn_send_mutex, sizeof(int)*levels);
  87. setzero(me.cur_qspn_id, sizeof(int)*levels);
  88. qspn_reset_counters(levels);
  89. }
  90. void qspn_init(u_char levels)
  91. {
  92. /* register the qspn/tracer's ops in the pkt_op_table */
  93. add_pkt_op(TRACER_PKT, SKT_TCP, ntk_tcp_port, tracer_pkt_recv);
  94. add_pkt_op(TRACER_PKT_CONNECT, SKT_TCP, ntk_tcp_port, tracer_pkt_recv);
  95. add_pkt_op(QSPN_CLOSE, SKT_TCP, ntk_tcp_port, qspn_close);
  96. add_pkt_op(QSPN_OPEN, SKT_TCP, ntk_tcp_port, qspn_open);
  97. /*
  98. * Alloc the qspn stuff
  99. */
  100. qspn_b=xmalloc(sizeof(struct qspn_buffer *)*levels);
  101. qspn_send_mutex=xmalloc(sizeof(int)*levels);
  102. me.cur_qspn_id=xmalloc(sizeof(int)*levels);
  103. me.cur_qspn_time=xmalloc(sizeof(struct timeval)*levels);
  104. qspn_reset(levels);
  105. }
  106. void qspn_free(void)
  107. {
  108. if(qspn_b)
  109. xfree(qspn_b);
  110. if(qspn_send_mutex)
  111. xfree(qspn_send_mutex);
  112. if(me.cur_qspn_id)
  113. xfree(me.cur_qspn_id);
  114. if(me.cur_qspn_time)
  115. xfree(me.cur_qspn_time);
  116. }
  117. void qspn_b_clean(u_char level)
  118. {
  119. struct qspn_buffer *qb=qspn_b[level];
  120. list_for(qb) {
  121. if(!qb->replies)
  122. continue;
  123. if(qb->replier)
  124. xfree(qb->replier);
  125. if(qb->flags)
  126. xfree(qb->flags);
  127. qb->replies=0;
  128. qb->replier=0;
  129. qb->flags=0;
  130. }
  131. }
  132. /*
  133. * qspn_b_add: It adds a new element in the qspn_b 'qb' buffer and returns its
  134. * position.
  135. */
  136. int qspn_b_add(struct qspn_buffer *qb, u_char replier, u_short flags)
  137. {
  138. qb->replies++;
  139. qb->replier=xrealloc(qb->replier, sizeof(u_char)*qb->replies);
  140. qb->flags=xrealloc(qb->flags, sizeof(u_short)*qb->replies);
  141. qb->replier[qb->replies-1]=replier;
  142. qb->flags[qb->replies-1]=flags;
  143. return qb->replies-1;
  144. }
  145. struct
  146. qspn_buffer *qspn_b_find_rnode(struct qspn_buffer *qb, map_node *rnode)
  147. {
  148. list_for(qb)
  149. if(qb->rnode == rnode)
  150. return qb;
  151. return 0;
  152. }
  153. int qspn_b_find_reply(struct qspn_buffer *qb, int sub_id)
  154. {
  155. int i;
  156. if(!qb)
  157. return -1;
  158. for(i=0; i<qb->replies; i++)
  159. if(qb->replier[i] == sub_id)
  160. return i;
  161. return -1;
  162. }
  163. /*
  164. * qspn_b_del_dead_rnodes: deletes all the `qspn_buffer' structs present in
  165. * the `*qb' llist which point to a rnode which doesn't exist anymore
  166. * The number of structs removed is returned.
  167. */
  168. int qspn_b_del_dead_rnodes(struct qspn_buffer **qb, map_node *root_node)
  169. {
  170. struct qspn_buffer *q=*qb, *next;
  171. int i=0;
  172. list_safe_for(q, next)
  173. if(rnode_find(root_node, q->rnode) < 0) {
  174. *qb=list_del(*qb, q);
  175. i++;
  176. }
  177. return i;
  178. }
  179. /*
  180. * qspn_b_del_all_dead_rnodes: It uses qspn_b_del_dead_rnodes() for each
  181. * element of the qspn_b global array
  182. */
  183. void qspn_b_del_all_dead_rnodes(void)
  184. {
  185. int level, tot_levels=FAMILY_LVLS;
  186. map_node *root_node;
  187. for(level=0; level<tot_levels; level++) {
  188. qspn_set_map_vars(level, 0, &root_node, 0, 0);
  189. qspn_b_del_dead_rnodes(&qspn_b[level], root_node);
  190. }
  191. }
  192. /*
  193. * qspn_round_left: It returns the milliseconds left before the QSPN_WAIT_ROUND
  194. * expires. If the round is expired it returns 0.
  195. */
  196. int qspn_round_left(u_char level)
  197. {
  198. struct timeval cur_t, t;
  199. int wait_round, cur_elapsed, diff;
  200. gettimeofday(&cur_t, 0);
  201. timersub(&cur_t, &me.cur_qspn_time[level], &t);
  202. if(t.tv_sec >= 1) {
  203. /*
  204. * There are still seconds left, so, let's not consider the
  205. * millisec.
  206. */
  207. wait_round = QSPN_WAIT_ROUND_LVL(level);
  208. cur_elapsed = t.tv_usec;
  209. diff = wait_round - cur_elapsed;
  210. /*
  211. * We need to return diff in millisec, be sure to not overflow
  212. * the int
  213. */
  214. if(diff > (INT_MAX/1000))
  215. diff=(INT_MAX/1000)*1000;
  216. else
  217. diff*=1000;
  218. } else {
  219. wait_round = QSPN_WAIT_ROUND_MS_LVL(level);
  220. cur_elapsed = MILLISEC(t);
  221. diff = wait_round - cur_elapsed;
  222. }
  223. return cur_elapsed >= wait_round ? 0 : diff;
  224. }
  225. /*
  226. * update_qspn_time: It updates me.cur_qspn_time;
  227. * Oh, sorry this code doesn't show consideration for the relativity time shit.
  228. * So you can't move at a velocity near the light's speed. I'm sorry.
  229. */
  230. void update_qspn_time(u_char level, u_int new_qspn_time)
  231. {
  232. struct timeval cur_t, t;
  233. int ret;
  234. gettimeofday(&cur_t, 0);
  235. if(new_qspn_time) {
  236. MILLISEC_TO_TV(new_qspn_time, t);
  237. timersub(&cur_t, &t, &me.cur_qspn_time[level]);
  238. } else
  239. timersub(&cur_t, &me.cur_qspn_time[level], &t);
  240. ret=QSPN_WAIT_ROUND_MS_LVL(level) - MILLISEC(t);
  241. if(ret < 0 && abs(ret) > QSPN_WAIT_ROUND_MS_LVL(level)) {
  242. ret*=-1;
  243. /*
  244. * We round `ret' to take off the time of the passed round,
  245. * then we can store in `ret' the number of ms passed since the
  246. * latest round.
  247. */
  248. ret=ret-(QSPN_WAIT_ROUND_MS_LVL(level)*(ret/QSPN_WAIT_ROUND_MS_LVL(level)));
  249. MILLISEC_TO_TV(ret, t);
  250. /*
  251. * Now we can calculate when the last round has started, the
  252. * result is stored in `me.cur_qspn_time[level]'
  253. */
  254. timersub(&cur_t, &t, &me.cur_qspn_time[level]);
  255. }
  256. }
  257. /*
  258. * qspn_inc_gcount: It updates the `gcount' array incrementing k
  259. * of `inc' each member which is in the position >= _EL(`level').
  260. * For example if level is 2, it will do: gcount[_EL(2)]+=inc;
  261. * gcount[_EL(3)]+=inc.
  262. * `level' must be < GCOUNT_LEVELS+1 and >= 1.
  263. */
  264. void qspn_inc_gcount(u_int *gcount, int level, int inc)
  265. {
  266. int i;
  267. if(level < 1 || level >= GCOUNT_LEVELS)
  268. return;
  269. for(i=_EL(level); i<GCOUNT_LEVELS; i++)
  270. gcount[i]+=inc;
  271. #ifdef DEBUG
  272. debug(DBG_INSANE, "Gnode_count incremented to: %d %d %d %d",
  273. gcount[0], gcount[1], gcount[2], gcount[3]);
  274. #endif
  275. }
  276. /*
  277. * qspn_dec_gcount: the same of qspn_inc_gcount(), but instead it decrements
  278. * `gcount'.
  279. */
  280. void qspn_dec_gcount(u_int *gcount, int level, int dec)
  281. {
  282. int i;
  283. if(level < 1 || level >= GCOUNT_LEVELS)
  284. return;
  285. for(i=_EL(level); i<GCOUNT_LEVELS; i++)
  286. gcount[i]-=dec;
  287. #ifdef DEBUG
  288. debug(DBG_INSANE, "Gnode_count decremented to: %d %d %d %d",
  289. gcount[0], gcount[1], gcount[2], gcount[3]);
  290. #endif
  291. }
  292. /*
  293. * qspn_reset_gcount: resets the gcount array by setting all its
  294. * first `level'# members to `value'.
  295. */
  296. void qspn_reset_gcount(u_int *gcount, int level, int value)
  297. {
  298. int i;
  299. for(i=0; i<level; i++)
  300. gcount[i]=value;
  301. #ifdef DEBUG
  302. debug(DBG_INSANE, "Gnode_count set to: %d %d %d %d",
  303. gcount[0], gcount[1], gcount[2], gcount[3]);
  304. #endif
  305. }
  306. /*
  307. * qspn_backup_gcount: copies `gcount' in `old_gcount'
  308. */
  309. void qspn_backup_gcount(u_int *old_gcount, int *gcount)
  310. {
  311. memcpy(old_gcount, gcount, sizeof(u_int)*GCOUNT_LEVELS);
  312. }
  313. /*
  314. * qspn_remove_deads: It removes the dead nodes from the maps at the level
  315. * `level' (if any).
  316. */
  317. void qspn_remove_deads(u_char level)
  318. {
  319. int bm, i, l, node_pos, ip[MAX_IP_INT];
  320. map_node *map, *node;
  321. map_gnode *gmap, *gnode=0;
  322. inet_gw *igw;
  323. qspn_set_map_vars(level, 0, 0, 0, &gmap);
  324. map=me.int_map;
  325. /*
  326. * How to remove the dead nodes from the map? How do we know which are
  327. * deads?
  328. * Pretty simple, we can't know so we mark all the nodes with the
  329. * QSPN_OLD flag and we wait until the next qspn_round.
  330. * The nodes which still have the QSPN_OLD flag weren't updated during
  331. * the previous qspn_round, thus they are dead.
  332. */
  333. for(i=0; i<MAXGROUPNODE; i++) {
  334. node_pos=i;
  335. if(!level)
  336. node=(map_node *)&map[node_pos];
  337. else {
  338. gnode=&gmap[node_pos];
  339. node=&gnode->g;
  340. if(gnode->flags & GMAP_VOID)
  341. continue;
  342. }
  343. if(node->flags & MAP_ME || node->flags & MAP_VOID)
  344. continue;
  345. if((node->flags & QSPN_OLD)) {
  346. /* The node wasn't updated in the previous QSPN.
  347. * Remove it from the maps */
  348. if(restricted_mode && node->flags & MAP_IGW) {
  349. /*
  350. * The node was an Internet gw, remove it from
  351. * me.igws
  352. */
  353. igw=igw_find_node(me.igws, i, node);
  354. if(igw) {
  355. memcpy(ip, igw->ip, MAX_IP_SZ);
  356. for(l=i; l<me.cur_quadg.levels && igw; l++) {
  357. igw_del(me.igws, me.igws_counter, igw, l);
  358. if(l+1 < me.cur_quadg.levels)
  359. igw=igw_find_ip(me.igws, l+1, (u_int*)ip);
  360. }
  361. igw_replace_def_igws(me.igws, me.igws_counter,
  362. me.my_igws, me.cur_quadg.levels, my_family);
  363. }
  364. }
  365. if((node->flags & MAP_BNODE) && level < me.cur_quadg.levels-1) {
  366. /*
  367. * The node is a border node, delete it from
  368. * the bmap.
  369. */
  370. bm=map_find_bnode(me.bnode_map[level],
  371. me.bmap_nodes[level], node_pos);
  372. if(bm != -1)
  373. me.bnode_map[level] =
  374. map_bnode_del(me.bnode_map[level],
  375. &me.bmap_nodes[level],
  376. &me.bnode_map[level][bm]);
  377. }
  378. if(level) {
  379. /*
  380. * Remove all the rnodes of the bnodes which
  381. * point to `node'.
  382. */
  383. l=GET_BMAP_LEVELS(my_family);
  384. bmaps_del_bnode_rnode(me.bnode_map,(int*) me.bmap_nodes, l,
  385. node);
  386. }
  387. if(!level) {
  388. debug(DBG_NORMAL, "qspn: The node %d is dead", i);
  389. map_node_del(node);
  390. qspn_dec_gcount((int*)qspn_gnode_count, level+1, 1);
  391. } else {
  392. debug(DBG_NORMAL,"The groupnode %d of level %d"
  393. " is dead", i, level);
  394. qspn_dec_gcount((int*)qspn_gnode_count, level+1,
  395. gnode->gcount);
  396. gmap_node_del(gnode);
  397. }
  398. gnode_dec_seeds(&me.cur_quadg, level);
  399. /* Delete its route */
  400. rt_update_node(0, node, 0, 0, 0, level);
  401. } else
  402. /* We are going to start a new QSPN, but first mark
  403. * this node as OLD, in this way we will be able to
  404. * see if it was updated during the new QSPN. */
  405. node->flags|=QSPN_OLD;
  406. }
  407. }
  408. /*
  409. * qspn_new_round: It prepares all the buffers for the new qspn_round and
  410. * removes the QSPN_OLD nodes from the map. The new qspn_round id is set
  411. * to `new_qspn_id'. If `new_qspn_id' is zero then the id is incremented by one
  412. * If `new_qspn_time' is not zero, the qspn_time[level] is set to the current
  413. * time minus `new_qspn_time'.
  414. */
  415. void qspn_new_round(u_char level, int new_qspn_id, u_int new_qspn_time)
  416. {
  417. int i;
  418. map_node *root_node, *node;
  419. qspn_set_map_vars(level, 0, &root_node, 0, 0);
  420. /* New round activated. Destroy the old one. beep. */
  421. if(new_qspn_id)
  422. me.cur_qspn_id[level]=new_qspn_id;
  423. else
  424. me.cur_qspn_id[level]++;
  425. if(new_qspn_time)
  426. update_qspn_time(level, new_qspn_time);
  427. else
  428. update_qspn_time(level, 0);
  429. qspn_b_clean(level);
  430. bmap_counter_reset(BMAP_LEVELS(me.cur_quadg.levels),
  431. me.bmap_nodes_closed);
  432. bmap_counter_reset(BMAP_LEVELS(me.cur_quadg.levels),
  433. me.bmap_nodes_opened);
  434. /* Copy the current gnode_count in old_gcount */
  435. qspn_backup_gcount(qspn_old_gcount,(int*) qspn_gnode_count);
  436. /* Clear the flags set during the previous qspn */
  437. root_node->flags&=~QSPN_STARTER & ~QSPN_CLOSED & ~QSPN_OPENED;
  438. for(i=0; i<root_node->links; i++) {
  439. node=(map_node *)root_node->r_node[i].r_node;
  440. node->flags &= ~QSPN_CLOSED & ~QSPN_OPENED &
  441. ~QSPN_STARTER & ~QSPN_OPENER;
  442. }
  443. /* Mark all bnodes with the BMAP_UPDATE flag, in this way
  444. * tracer_store_pkt will know what bnodes weren't updated during this
  445. * new round */
  446. bmaps_set_bnode_flag(me.bnode_map,(int*) me.bmap_nodes,
  447. GET_BMAP_LEVELS(my_family), BMAP_UPDATE);
  448. /* remove the dead nodes */
  449. qspn_remove_deads(level);
  450. }
  451. /* * * Exclude functions. (see pkts.h) * * */
  452. int exclude_from_and_opened_and_glevel(TRACER_PKT_EXCLUDE_VARS)
  453. {
  454. map_node *rn;
  455. struct qspn_buffer *qb, *qbp;
  456. int reply;
  457. u_char level;
  458. if(exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  459. return 1;
  460. level=excl_level-1;
  461. qb=qspn_b[level];
  462. if(e_rnode && level-1 >= 0)
  463. rn=&e_rnode->quadg.gnode[_EL(excl_level-1)]->g;
  464. else
  465. rn=(map_node *)me.cur_node->r_node[pos].r_node;
  466. qbp=qspn_b_find_rnode(qb, rn);
  467. if(!qbp)
  468. return 0;
  469. reply=qspn_b_find_reply(qbp, sub_id);
  470. if(qbp->flags[reply] & QSPN_OPENED)
  471. return 1;
  472. return 0;
  473. }
  474. int exclude_from_and_glevel_and_closed(TRACER_PKT_EXCLUDE_VARS)
  475. {
  476. if((node->flags & QSPN_CLOSED) ||
  477. exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  478. return 1;
  479. return 0;
  480. }
  481. int exclude_from_and_glevel_and_notstarter(TRACER_PKT_EXCLUDE_VARS)
  482. {
  483. int level=excl_level-1;
  484. if(exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  485. return 1;
  486. if((!level || (node->flags & MAP_BNODE)) && !(node->flags & QSPN_STARTER))
  487. return 1;
  488. return 0;
  489. }
  490. /*
  491. * The Holy qspn_send. It is used to send a new qspn_round when something
  492. * changes around the root_node (me).
  493. */
  494. int qspn_send(u_char level)
  495. {
  496. PACKET pkt;
  497. map_node *from;
  498. int round_ms, ret=0, ret_err, upper_gid, root_node_pos, qid;
  499. map_node *map, *root_node;
  500. map_gnode *gmap;
  501. u_char upper_level;
  502. qid=me.cur_qspn_id[level];
  503. from=me.cur_node;
  504. upper_level=level+1;
  505. qspn_set_map_vars(level, &map, &root_node, &root_node_pos, &gmap);
  506. /*
  507. * Now I explain how the level stuff in the qspn works. For example, if
  508. * we want to propagate the qspn in the level 2, we store in qspn.level
  509. * the upper level (3), and the gid of the upper_level which containts
  510. * the entire level 2. Simple no?
  511. */
  512. /*If we aren't a bnode it's useless to send qspn in higher levels*/
  513. if(level && !(me.cur_node->flags & MAP_BNODE))
  514. return -1;
  515. /* Do not send qspn packets if we are hooking! */
  516. if(me.cur_node->flags & MAP_HNODE)
  517. return 0;
  518. if(qspn_send_mutex[level])
  519. return 0;
  520. else
  521. qspn_send_mutex[level]=1;
  522. /*
  523. * We have to wait the finish of the old qspn_round to start the
  524. * new one.
  525. */
  526. while((round_ms=qspn_round_left(level)) > 0) {
  527. debug(DBG_INSANE, "Waiting %dms to send a new qspn_round, lvl:"
  528. " %d", round_ms, level);
  529. usleep(round_ms*1000);
  530. update_qspn_time(level, 0);
  531. }
  532. /*
  533. * If, after the above wait, the old saved qspn_id (`qid') it's not the
  534. * same of the current it means that we receveid already a new
  535. * qspn_round in this level, so forget about it ;)
  536. */
  537. if(qid != me.cur_qspn_id[level])
  538. return 0;
  539. qspn_new_round(level, 0, 0);
  540. root_node->flags|=QSPN_STARTER;
  541. upper_gid=me.cur_quadg.gid[upper_level];
  542. ret_err=tracer_pkt_build(QSPN_CLOSE, me.cur_qspn_id[level], root_node_pos, /*IDs*/
  543. upper_gid, level,
  544. 0, 0, 0, /*Received tracer_pkt*/
  545. 0, 0, 0, /*bnode_block*/
  546. &pkt); /*Where the pkt is built*/
  547. if(ret_err) {
  548. debug(DBG_NOISE, "Cannot send the new qspn_round: "
  549. "tracer_pkt build failed.");
  550. ret=-1;
  551. goto finish;
  552. }
  553. /*... send the qspn_opened to our r_nodes*/
  554. flood_pkt_send(exclude_from_and_glevel_and_closed, upper_level, -1,
  555. -1, pkt);
  556. debug(DBG_INSANE, "Qspn_round lvl: %d id: 0x%x sent", level,
  557. me.cur_qspn_id[level]);
  558. finish:
  559. qspn_send_mutex[level]=0;
  560. return ret;
  561. }
  562. /*
  563. * qspn_open_start: sends a new qspn_open when all the links are closed.
  564. * `from' is the node who sent the last qspn_close which closed the last
  565. * not-closed link.
  566. * `pkt_to_all' is the the last qspn_close pkt sent by from, which is an rnode
  567. * at the `from_rpos' position in the me.cur_node rnodes. `pkt_to_all' must
  568. * be passed with the new tracer_pkt entry already added because it is
  569. * sent as is.
  570. * `qspn_id', `root_node_pos', `gid' and `level' are the same parameters passed
  571. * to tracer_pkt_build to build the `pkt_to_all' pkt.
  572. * This functions is called only by qspn_close().
  573. */
  574. int qspn_open_start(int from_rpos, PACKET pkt_to_all, int qspn_id,
  575. int root_node_pos, int gid, int level)
  576. {
  577. PACKET pkt_to_from;
  578. int upper_level, ret_err;
  579. upper_level=level+1;
  580. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d, to broadcast",
  581. rq_to_str(QSPN_OPEN), qspn_id, level);
  582. /*
  583. * The `from' node doesn't need all the previous tracer_pkt entry
  584. * (which are kept in `pkt_to_all'), so we build a new tracer_pkt
  585. * only for it.
  586. */
  587. ret_err=tracer_pkt_build(QSPN_OPEN, qspn_id, root_node_pos, gid, level,
  588. 0, 0, 0, 0, 0, 0, &pkt_to_from);
  589. if(ret_err)
  590. debug(DBG_NOISE, "Cannot send the new qspn_open: "
  591. "pkt build failed.");
  592. else
  593. /* Send the pkt to `from' */
  594. flood_pkt_send(exclude_all_but_notfrom, upper_level,
  595. -1, from_rpos, pkt_to_from);
  596. /* Send the `pkt_to_all' pkt to all the other rnodes (if any)*/
  597. if(me.cur_node->links > 1) {
  598. pkt_to_all.hdr.op=QSPN_OPEN;
  599. flood_pkt_send(exclude_from_and_glevel, upper_level,
  600. -1, from_rpos, pkt_to_all);
  601. }
  602. return 0;
  603. }
  604. /*
  605. * Damn, this function is so ugly, it's a real pain. 19 args. ARGH!
  606. * But without it I had to copy two times this code, and even if I choose to
  607. * use a struct to pass all the args, they are still too many and it will be
  608. * uglier than this.
  609. * I'm sorry.
  610. * Ah, yes, this function splits the unpacked qspn_pkt and returns a lot of
  611. * vars. * DO NOT TRY THIS AT HOME *
  612. */
  613. int qspn_unpack_pkt(PACKET rpkt, brdcast_hdr **new_bcast_hdr,
  614. tracer_hdr **new_tracer_hdr, tracer_chunk **new_tracer,
  615. bnode_hdr **new_bhdr, size_t *new_bblock_sz,
  616. quadro_group *rip_quadg, int *new_real_from_rpos,
  617. u_short *new_hops, u_char *new_upper_level, int *new_gid,
  618. map_node **new_from, map_node **new_root_node,
  619. map_node **new_tracer_starter, int *new_sub_id,
  620. int *new_root_node_pos, u_char *new_level, u_char *new_blevel,
  621. char *new_just_forward_it, char *new_do_real_qspn_action)
  622. {
  623. brdcast_hdr *bcast_hdr;
  624. tracer_hdr *trcr_hdr;
  625. tracer_chunk *tracer;
  626. bnode_hdr *bhdr=0;
  627. size_t bblock_sz=0;
  628. int ret_err;
  629. u_short hops;
  630. map_node *from, *root_node, *tracer_starter;
  631. int gid, root_node_pos, real_from_rpos, sub_id;
  632. u_char level, upper_level, blevel;
  633. map_gnode *gfrom, *gtracer_starter;
  634. const char *ntop;
  635. char do_real_qspn_action=0, just_forward_it=0;
  636. if(server_opt.dbg_lvl) {
  637. ntop=inet_to_str(rpkt.from);
  638. debug(DBG_NOISE, "%s(0x%x) from %s", rq_to_str(rpkt.hdr.op),
  639. rpkt.hdr.id, ntop);
  640. }
  641. ret_err=tracer_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  642. &bblock_sz, rip_quadg, &real_from_rpos);
  643. if(ret_err) {
  644. ntop=inet_to_str(rpkt.from);
  645. debug(DBG_NOISE, "qspn_unpack_pkt(): The %s node sent an "
  646. "invalid %s (0x%x) pkt here.", ntop,
  647. rq_to_str(rpkt.hdr.op), rpkt.hdr.id);
  648. return -1;
  649. }
  650. gid = bcast_hdr->g_node;
  651. upper_level = level = bcast_hdr->level;
  652. hops = trcr_hdr->hops;
  653. if(!level || level==1) {
  654. level=0;
  655. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  656. from = node_from_pos(tracer[hops-1].node,
  657. me.int_map);
  658. tracer_starter = node_from_pos(tracer[0].node, me.int_map);
  659. } else {
  660. level--;
  661. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  662. gfrom = gnode_from_pos(tracer[hops-1].node,
  663. me.ext_map[_EL(level)]);
  664. from = &gfrom->g;
  665. gtracer_starter = gnode_from_pos(tracer[0].node,
  666. me.ext_map[_EL(level)]);
  667. tracer_starter = &gtracer_starter->g;
  668. }
  669. blevel = level-1;
  670. from->flags&=~QSPN_OLD;
  671. sub_id=bcast_hdr->sub_id;
  672. /* Only if we are in the level 0, or if we are a bnode, we can do the
  673. * real qspn actions, otherwise we simply forward the pkt.
  674. * In other words:
  675. * `just_forward_it'==0 means that we are a truly bnode, or that
  676. * level is 0.
  677. * `do_real_qspn_action'==1 means that we are a bnode also at `level'
  678. * or that level is 0
  679. */
  680. if(level && !(me.cur_node->flags & MAP_BNODE))
  681. just_forward_it=1;
  682. if(!level || ((root_node->flags & MAP_BNODE) && !just_forward_it))
  683. do_real_qspn_action=1;
  684. /* Return all the load of pointers, Argh */
  685. *new_bcast_hdr=bcast_hdr;
  686. *new_tracer_hdr=trcr_hdr;
  687. *new_tracer=tracer;
  688. *new_bhdr=bhdr;
  689. *new_bblock_sz=bblock_sz;
  690. *new_hops=hops;
  691. *new_upper_level=upper_level;
  692. *new_gid=gid;
  693. *new_sub_id=sub_id;
  694. *new_from=from;
  695. *new_root_node=root_node;
  696. *new_tracer_starter=tracer_starter;
  697. *new_gid=gid;
  698. *new_root_node_pos=root_node_pos;
  699. *new_real_from_rpos=real_from_rpos;
  700. *new_level=level;
  701. *new_blevel=blevel;
  702. *new_upper_level=upper_level;
  703. *new_just_forward_it=just_forward_it;
  704. *new_do_real_qspn_action=do_real_qspn_action;
  705. return 0;
  706. }
  707. /*
  708. * qspn_close: It receive a QSPN_CLOSE pkt, analyzes it, stores the routes,
  709. * closes the rpkt.from link and then keeps forwarding it to all the non
  710. * closed links. If all the links are closed, a qspn_open will be sent.
  711. */
  712. int qspn_close(PACKET rpkt)
  713. {
  714. PACKET pkt;
  715. brdcast_hdr *bcast_hdr;
  716. tracer_hdr *trcr_hdr;
  717. tracer_chunk *tracer;
  718. bnode_hdr *bhdr=0;
  719. size_t bblock_sz=0, old_bblock_sz;
  720. int i, not_closed=0, ret=0, ret_err;
  721. u_short hops, old_bblocks_found=0;
  722. const char *ntop;
  723. char *old_bblock=0;
  724. char do_real_qspn_action=0, just_forward_it=0, int_qspn_starter=0;
  725. char all_bnodes_are_closed=0, start_new_qspn_open=0;
  726. u_char rq;
  727. map_node *from, *root_node, *tracer_starter, *node;
  728. quadro_group rip_quadg;
  729. u_int trtt;
  730. int gid, root_node_pos, real_from_rpos, sub_id;
  731. u_char level, upper_level, blevel;
  732. /* Drop the qspn pkt if we are hooking */
  733. if(me.cur_node->flags & MAP_HNODE)
  734. goto finish;
  735. /*
  736. * * Unpack the qspn pkt and split it * *
  737. */
  738. ret_err=qspn_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  739. &bblock_sz, &rip_quadg, &real_from_rpos,
  740. &hops, &upper_level, &gid,
  741. &from, &root_node,
  742. &tracer_starter, &sub_id,
  743. &root_node_pos, &level, &blevel,
  744. &just_forward_it, &do_real_qspn_action);
  745. if(ret_err < 0) {
  746. ret = -1;
  747. goto finish;
  748. }
  749. #ifdef DEBUG
  750. debug(DBG_INSANE, "QSPN_CLOSE(0x%x, lvl %d): node[0]: %d, node[1]: %d, hops: %d",
  751. rpkt.hdr.id, level, tracer[0].node,
  752. trcr_hdr->hops > 1 ? tracer[1].node : -1 ,
  753. trcr_hdr->hops);
  754. #endif
  755. /*
  756. * * * Verify the qspn_close pkt * *
  757. */
  758. /* If the rpkt is the same qspn_close we sent we can drop it */
  759. if( ( !level || (do_real_qspn_action &&
  760. (root_node->flags & QSPN_STARTER)) )
  761. && tracer_starter == root_node) {
  762. ntop=inet_to_str(rpkt.from);
  763. debug(DBG_NOISE, "qspn_close(0x%x): Dropped qspn_close from "
  764. "%s: we are the qspn_starter of that pkt!"
  765. " (hops: %d)", rpkt.hdr.id, ntop,
  766. trcr_hdr->hops);
  767. ret=-1;
  768. goto finish;
  769. }
  770. /*
  771. * Check if the qspn_round is old or if it is the new one.
  772. */
  773. if(rpkt.hdr.id >= me.cur_qspn_id[level]+1) {
  774. /* Happy new round */
  775. tracer_get_trtt(real_from_rpos, trcr_hdr, tracer, &trtt);
  776. debug(DBG_NOISE, "New qspn_round 0x%x lvl %d received,"
  777. " new qspn_time: %dms", rpkt.hdr.id,
  778. level, trtt);
  779. qspn_new_round(level, rpkt.hdr.id, trtt);
  780. } else if(rpkt.hdr.id < me.cur_qspn_id[level]) {
  781. /* Reject it, it's old */
  782. ntop=inet_to_str(rpkt.from);
  783. debug(DBG_NOISE, "qspn_close(): %s sent a qspn_close"
  784. " with a wrong qspn_id(0x%x,lvl %d)"
  785. "qid 0x%x", ntop, rpkt.hdr.id, level,
  786. me.cur_qspn_id[level]);
  787. ret=-1;
  788. goto finish;
  789. }
  790. /* Some bnode, which is in the same gnode where we are, sent a
  791. * qspn_close, so we are a qspn_starter too */
  792. if(level && tracer_starter == root_node && hops == 1 &&
  793. do_real_qspn_action) {
  794. root_node->flags|=QSPN_STARTER;
  795. /* This flag indicates that the new qspn_round we received was
  796. * sent from our gnode, so it is an internal qspn starter.*/
  797. int_qspn_starter=1;
  798. }
  799. /* We have only to forward it, nothing more */
  800. if(level && from == root_node)
  801. just_forward_it=1;
  802. /* Time to update our maps */
  803. tracer_store_pkt(rpkt.from, &rip_quadg, level, trcr_hdr, tracer,
  804. (void *)bhdr, bblock_sz, &old_bblocks_found, &old_bblock,
  805. &old_bblock_sz);
  806. if(hops > 1 && !int_qspn_starter && (root_node->flags & QSPN_STARTER) &&
  807. !(from->flags & QSPN_STARTER)) {
  808. ntop=inet_to_str(rpkt.from);
  809. debug(DBG_NOISE, "qspn_close(): Dropped qspn_close from %s: we"
  810. " are a qspn_starter, the pkts has (hops=%d)>1"
  811. " and was forwarded by a non qspn_starter",
  812. ntop, hops);
  813. goto finish;
  814. }
  815. if(bcast_hdr->flags & QSPN_BNODE_CLOSED) {
  816. if(from == root_node) {
  817. /*
  818. * This pkt passed through a bnode which has all its
  819. * links closed. Increment the counter.
  820. */
  821. me.bmap_nodes_closed[blevel]++;
  822. } else
  823. bcast_hdr->flags &= ~QSPN_BNODE_CLOSED;
  824. }
  825. if(!level || me.bmap_nodes_closed[blevel] >= (me.bmap_nodes[blevel]-1))
  826. all_bnodes_are_closed=1;
  827. not_closed=0;
  828. if(do_real_qspn_action && !just_forward_it) {
  829. /*
  830. * We close the from node and we see if there are any links,
  831. * which are still `not_closed'.
  832. */
  833. for(i=0; i<root_node->links; i++) {
  834. node=(map_node *)root_node->r_node[i].r_node;
  835. if(root_node->r_node[i].r_node == (int *)from) {
  836. #ifdef DEBUG
  837. int pos;
  838. pos = !level ? pos_from_node(node, me.int_map) :
  839. pos_from_gnode((map_gnode *)node,
  840. me.ext_map[_EL(level)]);
  841. debug(DBG_INSANE, "Closing %d [g]node, lvl %d",
  842. pos, level);
  843. #endif
  844. node->flags|=QSPN_CLOSED;
  845. }
  846. if(!(node->flags & QSPN_CLOSED))
  847. not_closed++;
  848. }
  849. /* If we are a starter then `from' is starter too */
  850. if(root_node->flags & QSPN_STARTER ) {
  851. from->flags|=QSPN_STARTER;
  852. bcast_hdr->flags|=BCAST_TRACER_STARTERS;
  853. }
  854. /*
  855. * If we have the links closed and we are in level > 0, set
  856. * the flags to let the other bnodes know.
  857. */
  858. if(!not_closed && level && !(root_node->flags & QSPN_CLOSED)) {
  859. bcast_hdr->flags|=QSPN_BNODE_CLOSED;
  860. root_node->flags|=QSPN_CLOSED;
  861. }
  862. if(!just_forward_it && !not_closed &&
  863. !(root_node->flags & QSPN_OPENER) &&
  864. !(root_node->flags & QSPN_STARTER) &&
  865. all_bnodes_are_closed) {
  866. rq=QSPN_OPEN;
  867. start_new_qspn_open=1;
  868. } else
  869. rq=QSPN_CLOSE;
  870. /*We build d4 p4ck37...*/
  871. ret_err=tracer_pkt_build(
  872. rq, rpkt.hdr.id, root_node_pos, /*IDs*/
  873. gid, level,
  874. bcast_hdr, trcr_hdr, tracer, /*Received tracer_pkt*/
  875. old_bblocks_found, old_bblock, old_bblock_sz, /*bnode_block*/
  876. &pkt); /*Where the pkt is built*/
  877. if(ret_err) {
  878. debug(DBG_NOISE, "Cannot forward the qspn_close: "
  879. "pkt build failed.");
  880. ret=-1;
  881. goto finish;
  882. }
  883. } else {
  884. /*
  885. * Increment the rtt of the last gnode chunk, because we
  886. * aren't adding any entry, but we are just forwarding it.
  887. */
  888. debug(DBG_INSANE, "qspn_close: Incrementing the last hops rtt.");
  889. ret_err=tracer_add_rtt(real_from_rpos, tracer, hops-1);
  890. if(ret_err < 0)
  891. debug(DBG_NOISE, "tracer_add_rtt(0x%x) hop %d failed",
  892. rpkt.hdr.id, hops-1);
  893. /* the pkt we're sending is a copy of the received one */
  894. pkt_copy(&pkt, &rpkt);
  895. pkt_clear(&pkt);
  896. }
  897. /*
  898. * * Forward the new pkt * *
  899. */
  900. if(start_new_qspn_open) {
  901. /*
  902. * We have all the links closed and we haven't sent a
  903. * qspn_open yet, time to become an opener
  904. */
  905. qspn_open_start(real_from_rpos, pkt, rpkt.hdr.id, root_node_pos,
  906. gid, level);
  907. root_node->flags|=QSPN_OPENER;
  908. } else if((root_node->flags & QSPN_STARTER) && !int_qspn_starter) {
  909. /* We send a normal tracer_pkt limited to the qspn_starter nodes */
  910. pkt.hdr.op=TRACER_PKT;
  911. pkt.hdr.id=++root_node->brdcast;
  912. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d to the qspn starters",
  913. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  914. flood_pkt_send(exclude_from_and_glevel, upper_level, -1,
  915. real_from_rpos, pkt);
  916. } else {
  917. /*
  918. * Forward the qspn_close to all our r_nodes which are not
  919. * closed!
  920. */
  921. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d to broadcast",
  922. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  923. flood_pkt_send(exclude_from_and_glevel_and_closed,
  924. upper_level, -1, real_from_rpos, pkt);
  925. }
  926. finish:
  927. if(old_bblock)
  928. xfree(old_bblock);
  929. return ret;
  930. }
  931. int qspn_open(PACKET rpkt)
  932. {
  933. PACKET pkt;
  934. brdcast_hdr *bcast_hdr;
  935. tracer_hdr *trcr_hdr;
  936. tracer_chunk *tracer;
  937. bnode_hdr *bhdr=0;
  938. struct qspn_buffer *qb=0;
  939. int not_opened=0, ret=0, reply, sub_id, ret_err;
  940. u_short hops;
  941. size_t bblock_sz=0, old_bblock_sz;
  942. u_short old_bblocks_found=0;
  943. const char *ntop;
  944. char *old_bblock=0;
  945. char do_real_qspn_action=0, just_forward_it=0, int_qspn_opener=0;
  946. char all_bnodes_are_opened=0;
  947. map_node *from, *root_node, *tracer_starter;
  948. quadro_group rip_quadg;
  949. int gid, root_node_pos, real_from_rpos;
  950. u_char level, upper_level, blevel;
  951. /* Drop the qspn pkt if we are hooking */
  952. if(me.cur_node->flags & MAP_HNODE)
  953. goto finish;
  954. /*
  955. * * Unpack the qspn pkt and split it * *
  956. */
  957. ret_err=qspn_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  958. &bblock_sz, &rip_quadg, &real_from_rpos,
  959. &hops, &upper_level, &gid,
  960. &from, &root_node,
  961. &tracer_starter, &sub_id,
  962. &root_node_pos, &level, &blevel,
  963. &just_forward_it, &do_real_qspn_action);
  964. if(ret_err < 0) {
  965. ret = -1;
  966. goto finish;
  967. }
  968. #ifdef DEBUG
  969. debug(DBG_INSANE, "QSPN_OPEN(0x%x, lvl %d): node[0]: %d, node[1]: %d, hops: %d",
  970. rpkt.hdr.id, level, tracer[0].node,
  971. trcr_hdr->hops > 1 ? tracer[1].node : -1 ,
  972. trcr_hdr->hops);
  973. #endif
  974. /*
  975. * * * Verify the qspn_open pkt * *
  976. */
  977. if( ( !level || (do_real_qspn_action &&
  978. (root_node->flags & QSPN_OPENER)) )
  979. && sub_id == root_node_pos) {
  980. ntop=inet_to_str(rpkt.from);
  981. debug(DBG_NOISE, "qspn_open(0x%x): Dropped qspn_open from "
  982. "%s: we are the qspn_starter of that pkt!"
  983. " (hops: %d)", rpkt.hdr.id, ntop,
  984. trcr_hdr->hops);
  985. ret=-1;
  986. goto finish;
  987. }
  988. if(rpkt.hdr.id < me.cur_qspn_id[level]) {
  989. ntop=inet_to_str(rpkt.from);
  990. debug(DBG_NOISE, "qspn_open(): %s sent a qspn_open"
  991. " with a wrong qspn_id (0x%x), cur_id: 0x%x",
  992. ntop, rpkt.hdr.id, me.cur_qspn_id[level]);
  993. ret=-1;
  994. goto finish;
  995. }
  996. /* Some bnode, which is in the same gnode where we are, sent a
  997. * qspn_open, so we are a qspn_opener too */
  998. if(level && sub_id == root_node_pos && hops == 1 &&
  999. do_real_qspn_action) {
  1000. root_node->flags|=QSPN_OPENER;
  1001. /* This flag indicates that the new qspn_open we received was
  1002. * sent from our gnode, so it is an internal qspn opener.*/
  1003. int_qspn_opener=1;
  1004. }
  1005. /* We have only to forward it */
  1006. if(level && from == root_node)
  1007. just_forward_it=1;
  1008. /*Time to update our map*/
  1009. tracer_store_pkt(rpkt.from, &rip_quadg, level, trcr_hdr, tracer,
  1010. (void *)bhdr, bblock_sz, &old_bblocks_found, &old_bblock,
  1011. &old_bblock_sz);
  1012. if(bcast_hdr->flags & QSPN_BNODE_OPENED) {
  1013. if(from == root_node) {
  1014. /*
  1015. * This pkt passed through a bnode which has all its
  1016. * links opened. Increment the counter.
  1017. */
  1018. me.bmap_nodes_opened[blevel]++;
  1019. } else
  1020. bcast_hdr->flags &= ~QSPN_BNODE_OPENED;
  1021. }
  1022. if(!level || me.bmap_nodes_opened[blevel] >= (me.bmap_nodes[blevel]-1))
  1023. all_bnodes_are_opened=1;
  1024. not_opened=0;
  1025. if(do_real_qspn_action && !just_forward_it) {
  1026. /*
  1027. * We search in the qspn_buffer the reply which has current
  1028. * sub_id. If we don't find it, we add it.
  1029. */
  1030. qb=qspn_b[level];
  1031. if(!qb) {
  1032. debug(DBG_NOISE, "There isn't qspn_buffer information"
  1033. " for the %d level", level);
  1034. ret=-1;
  1035. goto finish;
  1036. }
  1037. if((reply=qspn_b_find_reply(qb, sub_id)) == -1)
  1038. list_for(qb)
  1039. reply=qspn_b_add(qb, sub_id, 0);
  1040. /* Time to open the links */
  1041. qb=qspn_b[level];
  1042. list_for(qb) {
  1043. if(qb->rnode == from)
  1044. qb->flags[reply]|=QSPN_OPENED;
  1045. if(!(qb->flags[reply] & QSPN_OPENED))
  1046. not_opened++;
  1047. }
  1048. /*
  1049. * If we have the links opened and we are in level > 0, set
  1050. * the flags to let the other bnodes know.
  1051. */
  1052. if(!not_opened && level && !(root_node->flags & QSPN_OPENED)){
  1053. bcast_hdr->flags|=QSPN_BNODE_OPENED;
  1054. root_node->flags|=QSPN_OPENED;
  1055. }
  1056. /*Fokke, we've all the links opened. let's take a rest.*/
  1057. if(!not_opened && all_bnodes_are_opened) {
  1058. debug(DBG_NOISE, "qspn_open(0x%x, sub_id: %d) lvl %d: "
  1059. "The qspn_open phase is finished",
  1060. rpkt.hdr.id, sub_id, level);
  1061. if(level && !(me.bmap_nodes[blevel]-1)) {
  1062. /*
  1063. * If in this `level' we are the only bnode,
  1064. * we need to broadcast the qspn_open to the
  1065. * other nodes in this gnode, to let them
  1066. * store the qspn_open's entries. So don't
  1067. * go to finish;
  1068. */
  1069. debug(DBG_INSANE, "Propagating the last qspn_open");
  1070. do_nothing();
  1071. } else
  1072. goto finish;
  1073. }
  1074. /* The forge of the packet. "One pkt to rule them all". Dum dum */
  1075. ret_err=tracer_pkt_build(
  1076. QSPN_OPEN, rpkt.hdr.id, bcast_hdr->sub_id, /*IDs*/
  1077. gid, level,
  1078. bcast_hdr, trcr_hdr, tracer, /*Received tracer_pkt*/
  1079. old_bblocks_found, old_bblock, old_bblock_sz,/*bnode_block*/
  1080. &pkt); /*Where the pkt is built*/
  1081. if(ret_err) {
  1082. debug(DBG_NOISE, "Cannot forward the qspn_open(0x%x) "
  1083. "lvl %d sub_id: %d: Pkt build failed.",
  1084. rpkt.hdr.id, level, sub_id);
  1085. ret=-1;
  1086. goto finish;
  1087. }
  1088. } else {
  1089. /*
  1090. * Increment the rtt of the last gnode chunk, because we
  1091. * aren't adding any entry, but we are just forwarding it.
  1092. */
  1093. debug(DBG_INSANE, "qspn_close: Incrementing the last hops rtt.");
  1094. ret_err=tracer_add_rtt(real_from_rpos, tracer, hops-1);
  1095. if(ret_err < 0)
  1096. debug(DBG_NOISE, "tracer_add_rtt(0x%x) hop %d failed",
  1097. rpkt.hdr.id, hops-1);
  1098. /* the pkt we're sending is a copy of the received one */
  1099. pkt_copy(&pkt, &rpkt);
  1100. pkt_clear(&pkt);
  1101. }
  1102. /*
  1103. * * Forward the new pkt * *
  1104. */
  1105. debug(DBG_INSANE, "%s(0x%x) lvl %d to broadcast",
  1106. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  1107. if(do_real_qspn_action && !int_qspn_opener) {
  1108. flood_pkt_send(exclude_from_and_opened_and_glevel,
  1109. upper_level, sub_id, real_from_rpos, pkt);
  1110. } else {
  1111. /* Just forward it without caring of opened or not rnodes */
  1112. flood_pkt_send(exclude_from_and_glevel, upper_level,
  1113. sub_id, real_from_rpos, pkt);
  1114. }
  1115. finish:
  1116. if(old_bblock)
  1117. xfree(old_bblock);
  1118. return ret;
  1119. }