You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

qspn.c 35KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. /* This file is part of Netsukuku
  2. * (c) Copyright 2005 Andrea Lo Pumo aka AlpT <alpt@freaknet.org>
  3. *
  4. * This source code is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as published
  6. * by the Free Software Foundation; either version 2 of the License,
  7. * or (at your option) any later version.
  8. *
  9. * This source code is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * Please refer to the GNU Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Public License along with
  15. * this source code; if not, write to:
  16. * Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. *
  18. * --
  19. * qspn.c:
  20. *
  21. * Here there is the code that implements the Quantum Shortest Path Netsukuku
  22. * meta-algorithm, the heart of Netsukuku.
  23. */
  24. #include "includes.h"
  25. #include "endianness.h"
  26. #include "bmap.h"
  27. #include "route.h"
  28. #include "request.h"
  29. #include "pkts.h"
  30. #include "tracer.h"
  31. #include "qspn.h"
  32. #include "igs.h"
  33. #include "netsukuku.h"
  34. #include "common.h"
  35. void
  36. qspn_set_map_vars(u_char level, map_node ** map, map_node ** root_node,
  37. int *root_node_pos, map_gnode ** gmap)
  38. {
  39. if (!level) {
  40. if (map)
  41. *map = me.int_map;
  42. if (root_node)
  43. *root_node = me.cur_node;
  44. if (root_node_pos)
  45. *root_node_pos = pos_from_node(me.cur_node, me.int_map);
  46. } else {
  47. if (map)
  48. *map = (map_node *) me.ext_map[_EL(level)];
  49. if (gmap)
  50. *gmap = me.ext_map[_EL(level)];
  51. if (root_node)
  52. *root_node = &me.cur_quadg.gnode[_EL(level)]->g;
  53. if (root_node_pos)
  54. *root_node_pos = me.cur_quadg.gid[level];
  55. }
  56. }
  57. /*
  58. * qspn_time_reset: Reset the qspn time of all the levels that go from
  59. * `start_level' to `end_level'. The total number of effective levels is
  60. * specified in `levels'.
  61. */
  62. void
  63. qspn_time_reset(int start_level, int end_level, int levels)
  64. {
  65. struct timeval cur_t;
  66. int i;
  67. if (end_level <= start_level)
  68. end_level = start_level + 1;
  69. /*
  70. * We fake the cur_qspn_time, so qspn_round_left thinks that a
  71. * qspn_round was already sent
  72. */
  73. gettimeofday(&cur_t, 0);
  74. cur_t.tv_sec -= QSPN_WAIT_ROUND_LVL(levels) * 2;
  75. for (i = start_level; i < end_level; i++)
  76. memcpy(&me.cur_qspn_time[i], &cur_t, sizeof(struct timeval));
  77. }
  78. void
  79. qspn_reset_counters(u_char levels)
  80. {
  81. /* Reset the qspn counters */
  82. qspn_time_reset(0, levels, levels);
  83. qspn_reset_gcount(qspn_gnode_count, GCOUNT_LEVELS, 1);
  84. qspn_reset_gcount(qspn_old_gcount, GCOUNT_LEVELS, 1);
  85. }
  86. void
  87. qspn_reset(u_char levels)
  88. {
  89. setzero(qspn_b, sizeof(struct qspn_buffer *) * levels);
  90. setzero(qspn_send_mutex, sizeof(int) * levels);
  91. setzero(me.cur_qspn_id, sizeof(int) * levels);
  92. qspn_reset_counters(levels);
  93. }
  94. void
  95. qspn_init(u_char levels)
  96. {
  97. /* register the qspn/tracer's ops in the pkt_op_table */
  98. add_pkt_op(TRACER_PKT, SKT_TCP, ntk_tcp_port, tracer_pkt_recv);
  99. add_pkt_op(TRACER_PKT_CONNECT, SKT_TCP, ntk_tcp_port, tracer_pkt_recv);
  100. add_pkt_op(QSPN_CLOSE, SKT_TCP, ntk_tcp_port, qspn_close);
  101. add_pkt_op(QSPN_OPEN, SKT_TCP, ntk_tcp_port, qspn_open);
  102. /*
  103. * Alloc the qspn stuff
  104. */
  105. qspn_b = xmalloc(sizeof(struct qspn_buffer *) * levels);
  106. qspn_send_mutex = xmalloc(sizeof(int) * levels);
  107. me.cur_qspn_id = xmalloc(sizeof(int) * levels);
  108. me.cur_qspn_time = xmalloc(sizeof(struct timeval) * levels);
  109. qspn_reset(levels);
  110. }
  111. void
  112. qspn_free(void)
  113. {
  114. if (qspn_b)
  115. xfree(qspn_b);
  116. if (qspn_send_mutex)
  117. xfree(qspn_send_mutex);
  118. if (me.cur_qspn_id)
  119. xfree(me.cur_qspn_id);
  120. if (me.cur_qspn_time)
  121. xfree(me.cur_qspn_time);
  122. }
  123. void
  124. qspn_b_clean(u_char level)
  125. {
  126. struct qspn_buffer *qb = qspn_b[level];
  127. list_for(qb) {
  128. if (!qb->replies)
  129. continue;
  130. if (qb->replier)
  131. xfree(qb->replier);
  132. if (qb->flags)
  133. xfree(qb->flags);
  134. qb->replies = 0;
  135. qb->replier = 0;
  136. qb->flags = 0;
  137. }
  138. }
  139. /*
  140. * qspn_b_add: It adds a new element in the qspn_b 'qb' buffer and returns its
  141. * position.
  142. */
  143. int
  144. qspn_b_add(struct qspn_buffer *qb, u_char replier, u_short flags)
  145. {
  146. qb->replies++;
  147. qb->replier = xrealloc(qb->replier, sizeof(u_char) * qb->replies);
  148. qb->flags = xrealloc(qb->flags, sizeof(u_short) * qb->replies);
  149. qb->replier[qb->replies - 1] = replier;
  150. qb->flags[qb->replies - 1] = flags;
  151. return qb->replies - 1;
  152. }
  153. struct
  154. qspn_buffer *
  155. qspn_b_find_rnode(struct qspn_buffer *qb, map_node * rnode)
  156. {
  157. list_for(qb)
  158. if (qb->rnode == rnode)
  159. return qb;
  160. return 0;
  161. }
  162. int
  163. qspn_b_find_reply(struct qspn_buffer *qb, int sub_id)
  164. {
  165. int i;
  166. if (!qb)
  167. return -1;
  168. for (i = 0; i < qb->replies; i++)
  169. if (qb->replier[i] == sub_id)
  170. return i;
  171. return -1;
  172. }
  173. /*
  174. * qspn_b_del_dead_rnodes: deletes all the `qspn_buffer' structs present in
  175. * the `*qb' llist which point to a rnode which doesn't exist anymore
  176. * The number of structs removed is returned.
  177. */
  178. int
  179. qspn_b_del_dead_rnodes(struct qspn_buffer **qb, map_node * root_node)
  180. {
  181. struct qspn_buffer *q = *qb, *next;
  182. int i = 0;
  183. list_safe_for(q, next)
  184. if (rnode_find(root_node, q->rnode) < 0) {
  185. *qb = list_del(*qb, q);
  186. i++;
  187. }
  188. return i;
  189. }
  190. /*
  191. * qspn_b_del_all_dead_rnodes: It uses qspn_b_del_dead_rnodes() for each
  192. * element of the qspn_b global array
  193. */
  194. void
  195. qspn_b_del_all_dead_rnodes(void)
  196. {
  197. int level, tot_levels = FAMILY_LVLS;
  198. map_node *root_node;
  199. for (level = 0; level < tot_levels; level++) {
  200. qspn_set_map_vars(level, 0, &root_node, 0, 0);
  201. qspn_b_del_dead_rnodes(&qspn_b[level], root_node);
  202. }
  203. }
  204. /*
  205. * qspn_round_left: It returns the milliseconds left before the QSPN_WAIT_ROUND
  206. * expires. If the round is expired it returns 0.
  207. */
  208. int
  209. qspn_round_left(u_char level)
  210. {
  211. struct timeval cur_t, t;
  212. int wait_round, cur_elapsed, diff;
  213. gettimeofday(&cur_t, 0);
  214. timersub(&cur_t, &me.cur_qspn_time[level], &t);
  215. if (t.tv_sec >= 1) {
  216. /*
  217. * There are still seconds left, so, let's not consider the
  218. * millisec.
  219. */
  220. wait_round = QSPN_WAIT_ROUND_LVL(level);
  221. cur_elapsed = t.tv_usec;
  222. diff = wait_round - cur_elapsed;
  223. /*
  224. * We need to return diff in millisec, be sure to not overflow
  225. * the int
  226. */
  227. if (diff > (INT_MAX / 1000))
  228. diff = (INT_MAX / 1000) * 1000;
  229. else
  230. diff *= 1000;
  231. } else {
  232. wait_round = QSPN_WAIT_ROUND_MS_LVL(level);
  233. cur_elapsed = MILLISEC(t);
  234. diff = wait_round - cur_elapsed;
  235. }
  236. return cur_elapsed >= wait_round ? 0 : diff;
  237. }
  238. /*
  239. * update_qspn_time: It updates me.cur_qspn_time;
  240. * Oh, sorry this code doesn't show consideration for the relativity time shit.
  241. * So you can't move at a velocity near the light's speed. I'm sorry.
  242. */
  243. void
  244. update_qspn_time(u_char level, u_int new_qspn_time)
  245. {
  246. struct timeval cur_t, t;
  247. int ret;
  248. gettimeofday(&cur_t, 0);
  249. if (new_qspn_time) {
  250. MILLISEC_TO_TV(new_qspn_time, t);
  251. timersub(&cur_t, &t, &me.cur_qspn_time[level]);
  252. } else
  253. timersub(&cur_t, &me.cur_qspn_time[level], &t);
  254. ret = QSPN_WAIT_ROUND_MS_LVL(level) - MILLISEC(t);
  255. if (ret < 0 && abs(ret) > QSPN_WAIT_ROUND_MS_LVL(level)) {
  256. ret *= -1;
  257. /*
  258. * We round `ret' to take off the time of the passed round,
  259. * then we can store in `ret' the number of ms passed since the
  260. * latest round.
  261. */
  262. ret =
  263. ret -
  264. (QSPN_WAIT_ROUND_MS_LVL(level) *
  265. (ret / QSPN_WAIT_ROUND_MS_LVL(level)));
  266. MILLISEC_TO_TV(ret, t);
  267. /*
  268. * Now we can calculate when the last round has started, the
  269. * result is stored in `me.cur_qspn_time[level]'
  270. */
  271. timersub(&cur_t, &t, &me.cur_qspn_time[level]);
  272. }
  273. }
  274. /*
  275. * qspn_inc_gcount: It updates the `gcount' array incrementing k
  276. * of `inc' each member which is in the position >= _EL(`level').
  277. * For example if level is 2, it will do: gcount[_EL(2)]+=inc;
  278. * gcount[_EL(3)]+=inc.
  279. * `level' must be < GCOUNT_LEVELS+1 and >= 1.
  280. */
  281. void
  282. qspn_inc_gcount(u_int * gcount, int level, int inc)
  283. {
  284. int i;
  285. if (level < 1 || level >= GCOUNT_LEVELS)
  286. return;
  287. for (i = _EL(level); i < GCOUNT_LEVELS; i++)
  288. gcount[i] += inc;
  289. #ifdef DEBUG
  290. debug(DBG_INSANE, "Gnode_count incremented to: %d %d %d %d",
  291. gcount[0], gcount[1], gcount[2], gcount[3]);
  292. #endif
  293. }
  294. /*
  295. * qspn_dec_gcount: the same of qspn_inc_gcount(), but instead it decrements
  296. * `gcount'.
  297. */
  298. void
  299. qspn_dec_gcount(u_int * gcount, int level, int dec)
  300. {
  301. int i;
  302. if (level < 1 || level >= GCOUNT_LEVELS)
  303. return;
  304. for (i = _EL(level); i < GCOUNT_LEVELS; i++)
  305. gcount[i] -= dec;
  306. #ifdef DEBUG
  307. debug(DBG_INSANE, "Gnode_count decremented to: %d %d %d %d",
  308. gcount[0], gcount[1], gcount[2], gcount[3]);
  309. #endif
  310. }
  311. /*
  312. * qspn_reset_gcount: resets the gcount array by setting all its
  313. * first `level'# members to `value'.
  314. */
  315. void
  316. qspn_reset_gcount(u_int * gcount, int level, int value)
  317. {
  318. int i;
  319. for (i = 0; i < level; i++)
  320. gcount[i] = value;
  321. #ifdef DEBUG
  322. debug(DBG_INSANE, "Gnode_count set to: %d %d %d %d",
  323. gcount[0], gcount[1], gcount[2], gcount[3]);
  324. #endif
  325. }
  326. /*
  327. * qspn_backup_gcount: copies `gcount' in `old_gcount'
  328. */
  329. void
  330. qspn_backup_gcount(u_int * old_gcount, int *gcount)
  331. {
  332. memcpy(old_gcount, gcount, sizeof(u_int) * GCOUNT_LEVELS);
  333. }
  334. /*
  335. * qspn_remove_deads: It removes the dead nodes from the maps at the level
  336. * `level' (if any).
  337. */
  338. void
  339. qspn_remove_deads(u_char level)
  340. {
  341. int bm, i, l, node_pos, ip[MAX_IP_INT];
  342. map_node *map, *node;
  343. map_gnode *gmap, *gnode = 0;
  344. inet_gw *igw;
  345. qspn_set_map_vars(level, 0, 0, 0, &gmap);
  346. map = me.int_map;
  347. /*
  348. * How to remove the dead nodes from the map? How do we know which are
  349. * deads?
  350. * Pretty simple, we can't know so we mark all the nodes with the
  351. * QSPN_OLD flag and we wait until the next qspn_round.
  352. * The nodes which still have the QSPN_OLD flag weren't updated during
  353. * the previous qspn_round, thus they are dead.
  354. */
  355. for (i = 0; i < MAXGROUPNODE; i++) {
  356. node_pos = i;
  357. if (!level)
  358. node = (map_node *) & map[node_pos];
  359. else {
  360. gnode = &gmap[node_pos];
  361. node = &gnode->g;
  362. if (gnode->flags & GMAP_VOID)
  363. continue;
  364. }
  365. if (node->flags & MAP_ME || node->flags & MAP_VOID)
  366. continue;
  367. if ((node->flags & QSPN_OLD)) {
  368. /* The node wasn't updated in the previous QSPN.
  369. * Remove it from the maps */
  370. if (restricted_mode && node->flags & MAP_IGW) {
  371. /*
  372. * The node was an Internet gw, remove it from
  373. * me.igws
  374. */
  375. igw = igw_find_node(me.igws, i, node);
  376. if (igw) {
  377. memcpy(ip, igw->ip, MAX_IP_SZ);
  378. for (l = i; l < me.cur_quadg.levels && igw; l++) {
  379. igw_del(me.igws, me.igws_counter, igw, l);
  380. if (l + 1 < me.cur_quadg.levels)
  381. igw =
  382. igw_find_ip(me.igws, l + 1, (u_int *) ip);
  383. }
  384. igw_replace_def_igws(me.igws, me.igws_counter,
  385. me.my_igws, me.cur_quadg.levels,
  386. my_family);
  387. }
  388. }
  389. if ((node->flags & MAP_BNODE)
  390. && level < me.cur_quadg.levels - 1) {
  391. /*
  392. * The node is a border node, delete it from
  393. * the bmap.
  394. */
  395. bm = map_find_bnode(me.bnode_map[level],
  396. me.bmap_nodes[level], node_pos);
  397. if (bm != -1)
  398. me.bnode_map[level] =
  399. map_bnode_del(me.bnode_map[level],
  400. &me.bmap_nodes[level],
  401. &me.bnode_map[level][bm]);
  402. }
  403. if (level) {
  404. /*
  405. * Remove all the rnodes of the bnodes which
  406. * point to `node'.
  407. */
  408. l = GET_BMAP_LEVELS(my_family);
  409. bmaps_del_bnode_rnode(me.bnode_map, (int *) me.bmap_nodes,
  410. l, node);
  411. }
  412. if (!level) {
  413. debug(DBG_NORMAL, "qspn: The node %d is dead", i);
  414. map_node_del(node);
  415. qspn_dec_gcount((u_int *) qspn_gnode_count, level + 1, 1);
  416. } else {
  417. debug(DBG_NORMAL, "The groupnode %d of level %d"
  418. " is dead", i, level);
  419. qspn_dec_gcount((u_int *) qspn_gnode_count, level + 1,
  420. gnode->gcount);
  421. gmap_node_del(gnode);
  422. }
  423. gnode_dec_seeds(&me.cur_quadg, level);
  424. /* Delete its route */
  425. rt_update_node(0, node, 0, 0, 0, level);
  426. } else
  427. /* We are going to start a new QSPN, but first mark
  428. * this node as OLD, in this way we will be able to
  429. * see if it was updated during the new QSPN. */
  430. node->flags |= QSPN_OLD;
  431. }
  432. }
  433. /*
  434. * qspn_new_round: It prepares all the buffers for the new qspn_round and
  435. * removes the QSPN_OLD nodes from the map. The new qspn_round id is set
  436. * to `new_qspn_id'. If `new_qspn_id' is zero then the id is incremented by one
  437. * If `new_qspn_time' is not zero, the qspn_time[level] is set to the current
  438. * time minus `new_qspn_time'.
  439. */
  440. void
  441. qspn_new_round(u_char level, int new_qspn_id, u_int new_qspn_time)
  442. {
  443. int i;
  444. map_node *root_node, *node;
  445. qspn_set_map_vars(level, 0, &root_node, 0, 0);
  446. /* New round activated. Destroy the old one. beep. */
  447. if (new_qspn_id)
  448. me.cur_qspn_id[level] = new_qspn_id;
  449. else
  450. me.cur_qspn_id[level]++;
  451. if (new_qspn_time)
  452. update_qspn_time(level, new_qspn_time);
  453. else
  454. update_qspn_time(level, 0);
  455. qspn_b_clean(level);
  456. bmap_counter_reset(BMAP_LEVELS(me.cur_quadg.levels),
  457. me.bmap_nodes_closed);
  458. bmap_counter_reset(BMAP_LEVELS(me.cur_quadg.levels),
  459. me.bmap_nodes_opened);
  460. /* Copy the current gnode_count in old_gcount */
  461. qspn_backup_gcount(qspn_old_gcount, (int *) qspn_gnode_count);
  462. /* Clear the flags set during the previous qspn */
  463. root_node->flags &= ~QSPN_STARTER & ~QSPN_CLOSED & ~QSPN_OPENED;
  464. for (i = 0; i < root_node->links; i++) {
  465. node = (map_node *) root_node->r_node[i].r_node;
  466. node->flags &= ~QSPN_CLOSED & ~QSPN_OPENED &
  467. ~QSPN_STARTER & ~QSPN_OPENER;
  468. }
  469. /* Mark all bnodes with the BMAP_UPDATE flag, in this way
  470. * tracer_store_pkt will know what bnodes weren't updated during this
  471. * new round */
  472. bmaps_set_bnode_flag(me.bnode_map, (int *) me.bmap_nodes,
  473. GET_BMAP_LEVELS(my_family), BMAP_UPDATE);
  474. /* remove the dead nodes */
  475. qspn_remove_deads(level);
  476. }
  477. /* * * Exclude functions. (see pkts.h) * * */
  478. int
  479. exclude_from_and_opened_and_glevel(TRACER_PKT_EXCLUDE_VARS)
  480. {
  481. map_node *rn;
  482. struct qspn_buffer *qb, *qbp;
  483. int reply;
  484. u_char level;
  485. if (exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  486. return 1;
  487. level = excl_level - 1;
  488. qb = qspn_b[level];
  489. if (e_rnode && level - 1 >= 0)
  490. rn = &e_rnode->quadg.gnode[_EL(excl_level - 1)]->g;
  491. else
  492. rn = (map_node *) me.cur_node->r_node[pos].r_node;
  493. qbp = qspn_b_find_rnode(qb, rn);
  494. if (!qbp)
  495. return 0;
  496. reply = qspn_b_find_reply(qbp, sub_id);
  497. if (qbp->flags[reply] & QSPN_OPENED)
  498. return 1;
  499. return 0;
  500. }
  501. int
  502. exclude_from_and_glevel_and_closed(TRACER_PKT_EXCLUDE_VARS)
  503. {
  504. if ((node->flags & QSPN_CLOSED) ||
  505. exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  506. return 1;
  507. return 0;
  508. }
  509. int
  510. exclude_from_and_glevel_and_notstarter(TRACER_PKT_EXCLUDE_VARS)
  511. {
  512. int level = excl_level - 1;
  513. if (exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS_NAME))
  514. return 1;
  515. if ((!level || (node->flags & MAP_BNODE))
  516. && !(node->flags & QSPN_STARTER))
  517. return 1;
  518. return 0;
  519. }
  520. /*
  521. * The Holy qspn_send. It is used to send a new qspn_round when something
  522. * changes around the root_node (me).
  523. */
  524. int
  525. qspn_send(u_char level)
  526. {
  527. PACKET pkt;
  528. int round_ms, ret = 0, ret_err, upper_gid, root_node_pos, qid;
  529. map_node *map, *root_node;
  530. map_gnode *gmap;
  531. u_char upper_level;
  532. qid = me.cur_qspn_id[level];
  533. upper_level = level + 1;
  534. qspn_set_map_vars(level, &map, &root_node, &root_node_pos, &gmap);
  535. /*
  536. * Now I explain how the level stuff in the qspn works. For example, if
  537. * we want to propagate the qspn in the level 2, we store in qspn.level
  538. * the upper level (3), and the gid of the upper_level which containts
  539. * the entire level 2. Simple no?
  540. */
  541. /*If we aren't a bnode it's useless to send qspn in higher levels */
  542. if (level && !(me.cur_node->flags & MAP_BNODE))
  543. return -1;
  544. /* Do not send qspn packets if we are hooking! */
  545. if (me.cur_node->flags & MAP_HNODE)
  546. return 0;
  547. if (qspn_send_mutex[level])
  548. return 0;
  549. else
  550. qspn_send_mutex[level] = 1;
  551. /*
  552. * We have to wait the finish of the old qspn_round to start the
  553. * new one.
  554. */
  555. while ((round_ms = qspn_round_left(level)) > 0) {
  556. debug(DBG_INSANE, "Waiting %dms to send a new qspn_round, lvl:"
  557. " %d", round_ms, level);
  558. usleep(round_ms * 1000);
  559. update_qspn_time(level, 0);
  560. }
  561. /*
  562. * If, after the above wait, the old saved qspn_id (`qid') it's not the
  563. * same of the current it means that we receveid already a new
  564. * qspn_round in this level, so forget about it ;)
  565. */
  566. if (qid != me.cur_qspn_id[level])
  567. return 0;
  568. qspn_new_round(level, 0, 0);
  569. root_node->flags |= QSPN_STARTER;
  570. upper_gid = me.cur_quadg.gid[upper_level];
  571. ret_err = tracer_pkt_build(QSPN_CLOSE, me.cur_qspn_id[level], root_node_pos, /*IDs */
  572. upper_gid, level, 0, 0, 0, /*Received tracer_pkt */
  573. 0, 0, 0, /*bnode_block */
  574. &pkt); /*Where the pkt is built */
  575. if (ret_err) {
  576. debug(DBG_NOISE, "Cannot send the new qspn_round: "
  577. "tracer_pkt build failed.");
  578. ret = -1;
  579. goto finish;
  580. }
  581. /*... send the qspn_opened to our r_nodes */
  582. flood_pkt_send(exclude_from_and_glevel_and_closed, upper_level, -1,
  583. -1, pkt);
  584. debug(DBG_INSANE, "Qspn_round lvl: %d id: 0x%x sent", level,
  585. me.cur_qspn_id[level]);
  586. finish:
  587. qspn_send_mutex[level] = 0;
  588. return ret;
  589. }
  590. /*
  591. * qspn_open_start: sends a new qspn_open when all the links are closed.
  592. * `from' is the node who sent the last qspn_close which closed the last
  593. * not-closed link.
  594. * `pkt_to_all' is the the last qspn_close pkt sent by from, which is an rnode
  595. * at the `from_rpos' position in the me.cur_node rnodes. `pkt_to_all' must
  596. * be passed with the new tracer_pkt entry already added because it is
  597. * sent as is.
  598. * `qspn_id', `root_node_pos', `gid' and `level' are the same parameters passed
  599. * to tracer_pkt_build to build the `pkt_to_all' pkt.
  600. * This functions is called only by qspn_close().
  601. */
  602. int
  603. qspn_open_start(int from_rpos, PACKET pkt_to_all, int qspn_id,
  604. int root_node_pos, int gid, int level)
  605. {
  606. PACKET pkt_to_from;
  607. int upper_level, ret_err;
  608. upper_level = level + 1;
  609. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d, to broadcast",
  610. rq_to_str(QSPN_OPEN), qspn_id, level);
  611. /*
  612. * The `from' node doesn't need all the previous tracer_pkt entry
  613. * (which are kept in `pkt_to_all'), so we build a new tracer_pkt
  614. * only for it.
  615. */
  616. ret_err =
  617. tracer_pkt_build(QSPN_OPEN, qspn_id, root_node_pos, gid, level, 0,
  618. 0, 0, 0, 0, 0, &pkt_to_from);
  619. if (ret_err)
  620. debug(DBG_NOISE, "Cannot send the new qspn_open: "
  621. "pkt build failed.");
  622. else
  623. /* Send the pkt to `from' */
  624. flood_pkt_send(exclude_all_but_notfrom, upper_level,
  625. -1, from_rpos, pkt_to_from);
  626. /* Send the `pkt_to_all' pkt to all the other rnodes (if any) */
  627. if (me.cur_node->links > 1) {
  628. pkt_to_all.hdr.op = QSPN_OPEN;
  629. flood_pkt_send(exclude_from_and_glevel, upper_level,
  630. -1, from_rpos, pkt_to_all);
  631. }
  632. return 0;
  633. }
  634. /*
  635. * Damn, this function is so ugly, it's a real pain. 19 args. ARGH!
  636. * But without it I had to copy two times this code, and even if I choose to
  637. * use a struct to pass all the args, they are still too many and it will be
  638. * uglier than this.
  639. * I'm sorry.
  640. * Ah, yes, this function splits the unpacked qspn_pkt and returns a lot of
  641. * vars. * DO NOT TRY THIS AT HOME *
  642. */
  643. int
  644. qspn_unpack_pkt(PACKET rpkt, brdcast_hdr ** new_bcast_hdr,
  645. tracer_hdr ** new_tracer_hdr, tracer_chunk ** new_tracer,
  646. bnode_hdr ** new_bhdr, size_t * new_bblock_sz,
  647. quadro_group * rip_quadg, int *new_real_from_rpos,
  648. u_short * new_hops, u_char * new_upper_level, int *new_gid,
  649. map_node ** new_from, map_node ** new_root_node,
  650. map_node ** new_tracer_starter, int *new_sub_id,
  651. int *new_root_node_pos, u_char * new_level,
  652. u_char * new_blevel, char *new_just_forward_it,
  653. char *new_do_real_qspn_action)
  654. {
  655. brdcast_hdr *bcast_hdr;
  656. tracer_hdr *trcr_hdr;
  657. tracer_chunk *tracer;
  658. bnode_hdr *bhdr = 0;
  659. size_t bblock_sz = 0;
  660. int ret_err;
  661. u_short hops;
  662. map_node *from, *root_node, *tracer_starter;
  663. int gid, root_node_pos, real_from_rpos, sub_id;
  664. u_char level, upper_level, blevel;
  665. map_gnode *gfrom, *gtracer_starter;
  666. const char *ntop;
  667. char do_real_qspn_action = 0, just_forward_it = 0;
  668. if (server_opt.dbg_lvl) {
  669. ntop = inet_to_str(rpkt.from);
  670. debug(DBG_NOISE, "%s(0x%x) from %s", rq_to_str(rpkt.hdr.op),
  671. rpkt.hdr.id, ntop);
  672. }
  673. ret_err =
  674. tracer_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  675. &bblock_sz, rip_quadg, &real_from_rpos);
  676. if (ret_err) {
  677. ntop = inet_to_str(rpkt.from);
  678. debug(DBG_NOISE, "qspn_unpack_pkt(): The %s node sent an "
  679. "invalid %s (0x%x) pkt here.", ntop,
  680. rq_to_str(rpkt.hdr.op), rpkt.hdr.id);
  681. return -1;
  682. }
  683. gid = bcast_hdr->g_node;
  684. upper_level = level = bcast_hdr->level;
  685. hops = trcr_hdr->hops;
  686. if (!level || level == 1) {
  687. level = 0;
  688. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  689. from = node_from_pos(tracer[hops - 1].node, me.int_map);
  690. tracer_starter = node_from_pos(tracer[0].node, me.int_map);
  691. } else {
  692. level--;
  693. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  694. gfrom = gnode_from_pos(tracer[hops - 1].node,
  695. me.ext_map[_EL(level)]);
  696. from = &gfrom->g;
  697. gtracer_starter = gnode_from_pos(tracer[0].node,
  698. me.ext_map[_EL(level)]);
  699. tracer_starter = &gtracer_starter->g;
  700. }
  701. blevel = level - 1;
  702. from->flags &= ~QSPN_OLD;
  703. sub_id = bcast_hdr->sub_id;
  704. /* Only if we are in the level 0, or if we are a bnode, we can do the
  705. * real qspn actions, otherwise we simply forward the pkt.
  706. * In other words:
  707. * `just_forward_it'==0 means that we are a truly bnode, or that
  708. * level is 0.
  709. * `do_real_qspn_action'==1 means that we are a bnode also at `level'
  710. * or that level is 0
  711. */
  712. if (level && !(me.cur_node->flags & MAP_BNODE))
  713. just_forward_it = 1;
  714. if (!level || ((root_node->flags & MAP_BNODE) && !just_forward_it))
  715. do_real_qspn_action = 1;
  716. /* Return all the load of pointers, Argh */
  717. *new_bcast_hdr = bcast_hdr;
  718. *new_tracer_hdr = trcr_hdr;
  719. *new_tracer = tracer;
  720. *new_bhdr = bhdr;
  721. *new_bblock_sz = bblock_sz;
  722. *new_hops = hops;
  723. *new_upper_level = upper_level;
  724. *new_gid = gid;
  725. *new_sub_id = sub_id;
  726. *new_from = from;
  727. *new_root_node = root_node;
  728. *new_tracer_starter = tracer_starter;
  729. *new_gid = gid;
  730. *new_root_node_pos = root_node_pos;
  731. *new_real_from_rpos = real_from_rpos;
  732. *new_level = level;
  733. *new_blevel = blevel;
  734. *new_upper_level = upper_level;
  735. *new_just_forward_it = just_forward_it;
  736. *new_do_real_qspn_action = do_real_qspn_action;
  737. return 0;
  738. }
  739. /*
  740. * qspn_close: It receive a QSPN_CLOSE pkt, analyzes it, stores the routes,
  741. * closes the rpkt.from link and then keeps forwarding it to all the non
  742. * closed links. If all the links are closed, a qspn_open will be sent.
  743. */
  744. int
  745. qspn_close(PACKET rpkt)
  746. {
  747. PACKET pkt;
  748. brdcast_hdr *bcast_hdr;
  749. tracer_hdr *trcr_hdr;
  750. tracer_chunk *tracer;
  751. bnode_hdr *bhdr = 0;
  752. size_t bblock_sz = 0, old_bblock_sz;
  753. int i, not_closed = 0, ret = 0, ret_err;
  754. u_short hops, old_bblocks_found = 0;
  755. const char *ntop;
  756. char *old_bblock = 0;
  757. char do_real_qspn_action = 0, just_forward_it = 0, int_qspn_starter =
  758. 0;
  759. char all_bnodes_are_closed = 0, start_new_qspn_open = 0;
  760. u_char rq;
  761. map_node *from, *root_node, *tracer_starter, *node;
  762. quadro_group rip_quadg;
  763. u_int trtt;
  764. int gid, root_node_pos, real_from_rpos, sub_id;
  765. u_char level, upper_level, blevel;
  766. /* Drop the qspn pkt if we are hooking */
  767. if (me.cur_node->flags & MAP_HNODE)
  768. goto finish;
  769. /*
  770. * * Unpack the qspn pkt and split it * *
  771. */
  772. ret_err = qspn_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  773. &bblock_sz, &rip_quadg, &real_from_rpos,
  774. &hops, &upper_level, &gid,
  775. &from, &root_node,
  776. &tracer_starter, &sub_id,
  777. &root_node_pos, &level, &blevel,
  778. &just_forward_it, &do_real_qspn_action);
  779. if (ret_err < 0) {
  780. ret = -1;
  781. goto finish;
  782. }
  783. #ifdef DEBUG
  784. debug(DBG_INSANE,
  785. "QSPN_CLOSE(0x%x, lvl %d): node[0]: %d, node[1]: %d, hops: %d",
  786. rpkt.hdr.id, level, tracer[0].node,
  787. trcr_hdr->hops > 1 ? tracer[1].node : -1, trcr_hdr->hops);
  788. #endif
  789. /*
  790. * * * Verify the qspn_close pkt * *
  791. */
  792. /* If the rpkt is the same qspn_close we sent we can drop it */
  793. if ((!level || (do_real_qspn_action &&
  794. (root_node->flags & QSPN_STARTER)))
  795. && tracer_starter == root_node) {
  796. ntop = inet_to_str(rpkt.from);
  797. debug(DBG_NOISE, "qspn_close(0x%x): Dropped qspn_close from "
  798. "%s: we are the qspn_starter of that pkt!"
  799. " (hops: %d)", rpkt.hdr.id, ntop, trcr_hdr->hops);
  800. ret = -1;
  801. goto finish;
  802. }
  803. /*
  804. * Check if the qspn_round is old or if it is the new one.
  805. */
  806. if (rpkt.hdr.id >= me.cur_qspn_id[level] + 1) {
  807. /* Happy new round */
  808. tracer_get_trtt(real_from_rpos, trcr_hdr, tracer, &trtt);
  809. debug(DBG_NOISE, "New qspn_round 0x%x lvl %d received,"
  810. " new qspn_time: %dms", rpkt.hdr.id, level, trtt);
  811. qspn_new_round(level, rpkt.hdr.id, trtt);
  812. } else if (rpkt.hdr.id < me.cur_qspn_id[level]) {
  813. /* Reject it, it's old */
  814. ntop = inet_to_str(rpkt.from);
  815. debug(DBG_NOISE, "qspn_close(): %s sent a qspn_close"
  816. " with a wrong qspn_id(0x%x,lvl %d)"
  817. "qid 0x%x", ntop, rpkt.hdr.id, level, me.cur_qspn_id[level]);
  818. ret = -1;
  819. goto finish;
  820. }
  821. /* Some bnode, which is in the same gnode where we are, sent a
  822. * qspn_close, so we are a qspn_starter too */
  823. if (level && tracer_starter == root_node && hops == 1 &&
  824. do_real_qspn_action) {
  825. root_node->flags |= QSPN_STARTER;
  826. /* This flag indicates that the new qspn_round we received was
  827. * sent from our gnode, so it is an internal qspn starter.*/
  828. int_qspn_starter = 1;
  829. }
  830. /* We have only to forward it, nothing more */
  831. if (level && from == root_node)
  832. just_forward_it = 1;
  833. /* Time to update our maps */
  834. tracer_store_pkt(rpkt.from, &rip_quadg, level, trcr_hdr, tracer,
  835. (void *) bhdr, bblock_sz, &old_bblocks_found,
  836. &old_bblock, &old_bblock_sz);
  837. if (hops > 1 && !int_qspn_starter && (root_node->flags & QSPN_STARTER)
  838. && !(from->flags & QSPN_STARTER)) {
  839. ntop = inet_to_str(rpkt.from);
  840. debug(DBG_NOISE, "qspn_close(): Dropped qspn_close from %s: we"
  841. " are a qspn_starter, the pkts has (hops=%d)>1"
  842. " and was forwarded by a non qspn_starter", ntop, hops);
  843. goto finish;
  844. }
  845. if (bcast_hdr->flags & QSPN_BNODE_CLOSED) {
  846. if (from == root_node) {
  847. /*
  848. * This pkt passed through a bnode which has all its
  849. * links closed. Increment the counter.
  850. */
  851. me.bmap_nodes_closed[blevel]++;
  852. } else
  853. bcast_hdr->flags &= ~QSPN_BNODE_CLOSED;
  854. }
  855. if (!level
  856. || me.bmap_nodes_closed[blevel] >= (me.bmap_nodes[blevel] - 1))
  857. all_bnodes_are_closed = 1;
  858. not_closed = 0;
  859. if (do_real_qspn_action && !just_forward_it) {
  860. /*
  861. * We close the from node and we see if there are any links,
  862. * which are still `not_closed'.
  863. */
  864. for (i = 0; i < root_node->links; i++) {
  865. node = (map_node *) root_node->r_node[i].r_node;
  866. if (root_node->r_node[i].r_node == (int *) from) {
  867. #ifdef DEBUG
  868. int pos;
  869. pos = !level ? pos_from_node(node, me.int_map) :
  870. pos_from_gnode((map_gnode *) node,
  871. me.ext_map[_EL(level)]);
  872. debug(DBG_INSANE, "Closing %d [g]node, lvl %d",
  873. pos, level);
  874. #endif
  875. node->flags |= QSPN_CLOSED;
  876. }
  877. if (!(node->flags & QSPN_CLOSED))
  878. not_closed++;
  879. }
  880. /* If we are a starter then `from' is starter too */
  881. if (root_node->flags & QSPN_STARTER) {
  882. from->flags |= QSPN_STARTER;
  883. bcast_hdr->flags |= BCAST_TRACER_STARTERS;
  884. }
  885. /*
  886. * If we have the links closed and we are in level > 0, set
  887. * the flags to let the other bnodes know.
  888. */
  889. if (!not_closed && level && !(root_node->flags & QSPN_CLOSED)) {
  890. bcast_hdr->flags |= QSPN_BNODE_CLOSED;
  891. root_node->flags |= QSPN_CLOSED;
  892. }
  893. if (!just_forward_it && !not_closed &&
  894. !(root_node->flags & QSPN_OPENER) &&
  895. !(root_node->flags & QSPN_STARTER) && all_bnodes_are_closed) {
  896. rq = QSPN_OPEN;
  897. start_new_qspn_open = 1;
  898. } else
  899. rq = QSPN_CLOSE;
  900. /*We build d4 p4ck37... */
  901. ret_err = tracer_pkt_build(rq, rpkt.hdr.id, root_node_pos, /*IDs */
  902. gid, level, bcast_hdr, trcr_hdr, tracer, /*Received tracer_pkt */
  903. old_bblocks_found, old_bblock, old_bblock_sz, /*bnode_block */
  904. &pkt); /*Where the pkt is built */
  905. if (ret_err) {
  906. debug(DBG_NOISE, "Cannot forward the qspn_close: "
  907. "pkt build failed.");
  908. ret = -1;
  909. goto finish;
  910. }
  911. } else {
  912. /*
  913. * Increment the rtt of the last gnode chunk, because we
  914. * aren't adding any entry, but we are just forwarding it.
  915. */
  916. debug(DBG_INSANE, "qspn_close: Incrementing the last hops rtt.");
  917. ret_err = tracer_add_rtt(real_from_rpos, tracer, hops - 1);
  918. if (ret_err < 0)
  919. debug(DBG_NOISE, "tracer_add_rtt(0x%x) hop %d failed",
  920. rpkt.hdr.id, hops - 1);
  921. /* the pkt we're sending is a copy of the received one */
  922. pkt_copy(&pkt, &rpkt);
  923. pkt_clear(&pkt);
  924. }
  925. /*
  926. * * Forward the new pkt * *
  927. */
  928. if (start_new_qspn_open) {
  929. /*
  930. * We have all the links closed and we haven't sent a
  931. * qspn_open yet, time to become an opener
  932. */
  933. qspn_open_start(real_from_rpos, pkt, rpkt.hdr.id, root_node_pos,
  934. gid, level);
  935. root_node->flags |= QSPN_OPENER;
  936. } else if ((root_node->flags & QSPN_STARTER) && !int_qspn_starter) {
  937. /* We send a normal tracer_pkt limited to the qspn_starter nodes */
  938. pkt.hdr.op = TRACER_PKT;
  939. pkt.hdr.id = ++root_node->brdcast;
  940. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d to the qspn starters",
  941. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  942. flood_pkt_send(exclude_from_and_glevel, upper_level, -1,
  943. real_from_rpos, pkt);
  944. } else {
  945. /*
  946. * Forward the qspn_close to all our r_nodes which are not
  947. * closed!
  948. */
  949. debug(DBG_INSANE, "Fwd %s(0x%x) lvl %d to broadcast",
  950. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  951. flood_pkt_send(exclude_from_and_glevel_and_closed,
  952. upper_level, -1, real_from_rpos, pkt);
  953. }
  954. finish:
  955. if (old_bblock)
  956. xfree(old_bblock);
  957. return ret;
  958. }
  959. int
  960. qspn_open(PACKET rpkt)
  961. {
  962. PACKET pkt;
  963. brdcast_hdr *bcast_hdr;
  964. tracer_hdr *trcr_hdr;
  965. tracer_chunk *tracer;
  966. bnode_hdr *bhdr = 0;
  967. struct qspn_buffer *qb = 0;
  968. int not_opened = 0, ret = 0, reply, sub_id, ret_err;
  969. u_short hops;
  970. size_t bblock_sz = 0, old_bblock_sz;
  971. u_short old_bblocks_found = 0;
  972. const char *ntop;
  973. char *old_bblock = 0;
  974. char do_real_qspn_action = 0, just_forward_it = 0, int_qspn_opener = 0;
  975. char all_bnodes_are_opened = 0;
  976. map_node *from, *root_node, *tracer_starter;
  977. quadro_group rip_quadg;
  978. int gid, root_node_pos, real_from_rpos;
  979. u_char level, upper_level, blevel;
  980. /* Drop the qspn pkt if we are hooking */
  981. if (me.cur_node->flags & MAP_HNODE)
  982. goto finish;
  983. /*
  984. * * Unpack the qspn pkt and split it * *
  985. */
  986. ret_err = qspn_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer, &bhdr,
  987. &bblock_sz, &rip_quadg, &real_from_rpos,
  988. &hops, &upper_level, &gid,
  989. &from, &root_node,
  990. &tracer_starter, &sub_id,
  991. &root_node_pos, &level, &blevel,
  992. &just_forward_it, &do_real_qspn_action);
  993. if (ret_err < 0) {
  994. ret = -1;
  995. goto finish;
  996. }
  997. #ifdef DEBUG
  998. debug(DBG_INSANE,
  999. "QSPN_OPEN(0x%x, lvl %d): node[0]: %d, node[1]: %d, hops: %d",
  1000. rpkt.hdr.id, level, tracer[0].node,
  1001. trcr_hdr->hops > 1 ? tracer[1].node : -1, trcr_hdr->hops);
  1002. #endif
  1003. /*
  1004. * * * Verify the qspn_open pkt * *
  1005. */
  1006. if ((!level || (do_real_qspn_action &&
  1007. (root_node->flags & QSPN_OPENER)))
  1008. && sub_id == root_node_pos) {
  1009. ntop = inet_to_str(rpkt.from);
  1010. debug(DBG_NOISE, "qspn_open(0x%x): Dropped qspn_open from "
  1011. "%s: we are the qspn_starter of that pkt!"
  1012. " (hops: %d)", rpkt.hdr.id, ntop, trcr_hdr->hops);
  1013. ret = -1;
  1014. goto finish;
  1015. }
  1016. if (rpkt.hdr.id < me.cur_qspn_id[level]) {
  1017. ntop = inet_to_str(rpkt.from);
  1018. debug(DBG_NOISE, "qspn_open(): %s sent a qspn_open"
  1019. " with a wrong qspn_id (0x%x), cur_id: 0x%x",
  1020. ntop, rpkt.hdr.id, me.cur_qspn_id[level]);
  1021. ret = -1;
  1022. goto finish;
  1023. }
  1024. /* Some bnode, which is in the same gnode where we are, sent a
  1025. * qspn_open, so we are a qspn_opener too */
  1026. if (level && sub_id == root_node_pos && hops == 1 &&
  1027. do_real_qspn_action) {
  1028. root_node->flags |= QSPN_OPENER;
  1029. /* This flag indicates that the new qspn_open we received was
  1030. * sent from our gnode, so it is an internal qspn opener.*/
  1031. int_qspn_opener = 1;
  1032. }
  1033. /* We have only to forward it */
  1034. if (level && from == root_node)
  1035. just_forward_it = 1;
  1036. /*Time to update our map */
  1037. tracer_store_pkt(rpkt.from, &rip_quadg, level, trcr_hdr, tracer,
  1038. (void *) bhdr, bblock_sz, &old_bblocks_found,
  1039. &old_bblock, &old_bblock_sz);
  1040. if (bcast_hdr->flags & QSPN_BNODE_OPENED) {
  1041. if (from == root_node) {
  1042. /*
  1043. * This pkt passed through a bnode which has all its
  1044. * links opened. Increment the counter.
  1045. */
  1046. me.bmap_nodes_opened[blevel]++;
  1047. } else
  1048. bcast_hdr->flags &= ~QSPN_BNODE_OPENED;
  1049. }
  1050. if (!level
  1051. || me.bmap_nodes_opened[blevel] >= (me.bmap_nodes[blevel] - 1))
  1052. all_bnodes_are_opened = 1;
  1053. not_opened = 0;
  1054. if (do_real_qspn_action && !just_forward_it) {
  1055. /*
  1056. * We search in the qspn_buffer the reply which has current
  1057. * sub_id. If we don't find it, we add it.
  1058. */
  1059. qb = qspn_b[level];
  1060. if (!qb) {
  1061. debug(DBG_NOISE, "There isn't qspn_buffer information"
  1062. " for the %d level", level);
  1063. ret = -1;
  1064. goto finish;
  1065. }
  1066. if ((reply = qspn_b_find_reply(qb, sub_id)) == -1)
  1067. list_for(qb)
  1068. reply = qspn_b_add(qb, sub_id, 0);
  1069. /* Time to open the links */
  1070. qb = qspn_b[level];
  1071. list_for(qb) {
  1072. if (qb->rnode == from)
  1073. qb->flags[reply] |= QSPN_OPENED;
  1074. if (!(qb->flags[reply] & QSPN_OPENED))
  1075. not_opened++;
  1076. }
  1077. /*
  1078. * If we have the links opened and we are in level > 0, set
  1079. * the flags to let the other bnodes know.
  1080. */
  1081. if (!not_opened && level && !(root_node->flags & QSPN_OPENED)) {
  1082. bcast_hdr->flags |= QSPN_BNODE_OPENED;
  1083. root_node->flags |= QSPN_OPENED;
  1084. }
  1085. /*Fokke, we've all the links opened. let's take a rest. */
  1086. if (!not_opened && all_bnodes_are_opened) {
  1087. debug(DBG_NOISE, "qspn_open(0x%x, sub_id: %d) lvl %d: "
  1088. "The qspn_open phase is finished",
  1089. rpkt.hdr.id, sub_id, level);
  1090. if (level && !(me.bmap_nodes[blevel] - 1)) {
  1091. /*
  1092. * If in this `level' we are the only bnode,
  1093. * we need to broadcast the qspn_open to the
  1094. * other nodes in this gnode, to let them
  1095. * store the qspn_open's entries. So don't
  1096. * go to finish;
  1097. */
  1098. debug(DBG_INSANE, "Propagating the last qspn_open");
  1099. do_nothing();
  1100. } else
  1101. goto finish;
  1102. }
  1103. /* The forge of the packet. "One pkt to rule them all". Dum dum */
  1104. ret_err = tracer_pkt_build(QSPN_OPEN, rpkt.hdr.id, bcast_hdr->sub_id, /*IDs */
  1105. gid, level, bcast_hdr, trcr_hdr, tracer, /*Received tracer_pkt */
  1106. old_bblocks_found, old_bblock, old_bblock_sz, /*bnode_block */
  1107. &pkt); /*Where the pkt is built */
  1108. if (ret_err) {
  1109. debug(DBG_NOISE, "Cannot forward the qspn_open(0x%x) "
  1110. "lvl %d sub_id: %d: Pkt build failed.",
  1111. rpkt.hdr.id, level, sub_id);
  1112. ret = -1;
  1113. goto finish;
  1114. }
  1115. } else {
  1116. /*
  1117. * Increment the rtt of the last gnode chunk, because we
  1118. * aren't adding any entry, but we are just forwarding it.
  1119. */
  1120. debug(DBG_INSANE, "qspn_close: Incrementing the last hops rtt.");
  1121. ret_err = tracer_add_rtt(real_from_rpos, tracer, hops - 1);
  1122. if (ret_err < 0)
  1123. debug(DBG_NOISE, "tracer_add_rtt(0x%x) hop %d failed",
  1124. rpkt.hdr.id, hops - 1);
  1125. /* the pkt we're sending is a copy of the received one */
  1126. pkt_copy(&pkt, &rpkt);
  1127. pkt_clear(&pkt);
  1128. }
  1129. /*
  1130. * * Forward the new pkt * *
  1131. */
  1132. debug(DBG_INSANE, "%s(0x%x) lvl %d to broadcast",
  1133. rq_to_str(pkt.hdr.op), pkt.hdr.id, level);
  1134. if (do_real_qspn_action && !int_qspn_opener) {
  1135. flood_pkt_send(exclude_from_and_opened_and_glevel,
  1136. upper_level, sub_id, real_from_rpos, pkt);
  1137. } else {
  1138. /* Just forward it without caring of opened or not rnodes */
  1139. flood_pkt_send(exclude_from_and_glevel, upper_level,
  1140. sub_id, real_from_rpos, pkt);
  1141. }
  1142. finish:
  1143. if (old_bblock)
  1144. xfree(old_bblock);
  1145. return ret;
  1146. }