You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tracer.c 43KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577
  1. /* This file is part of Netsukuku
  2. * (c) Copyright 2005 Andrea Lo Pumo aka AlpT <alpt@freaknet.org>
  3. *
  4. * This source code is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as published
  6. * by the Free Software Foundation; either version 2 of the License,
  7. * or (at your option) any later version.
  8. *
  9. * This source code is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * Please refer to the GNU Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Public License along with
  15. * this source code; if not, write to:
  16. * Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. #include "includes.h"
  19. #include "common.h"
  20. #include "request.h"
  21. #include "pkts.h"
  22. #include "bmap.h"
  23. #include "radar.h"
  24. #include "route.h"
  25. #include "radar.h"
  26. #include "rehook.h"
  27. #include "tracer.h"
  28. #include "qspn.h"
  29. #include "igs.h"
  30. #include "netsukuku.h"
  31. char *tracer_pack_pkt(brdcast_hdr * bcast_hdr, tracer_hdr * trcr_hdr,
  32. tracer_chunk * tracer, char *bblocks,
  33. size_t bblocks_sz, int new_bblocks);
  34. /*
  35. * ip_to_rfrom: If `rip_quadg' is null, it converts the `rip' ip in a
  36. * quadro_group that is stored in `new_quadg' (if it is not null), otherwise it
  37. * uses `rip_quadg' itself.
  38. * The flags passed to iptoquadg are orred whith `quadg_flags'.
  39. * The rnode position of the root_node of level 0 which corresponds to
  40. * the given ip is returned, if it isn't found -1 is returned.
  41. */
  42. int
  43. ip_to_rfrom(inet_prefix rip, quadro_group * rip_quadg,
  44. quadro_group * new_quadg, char quadg_flags)
  45. {
  46. quadro_group qdg, *quadg;
  47. map_node *from;
  48. ext_rnode_cache *erc;
  49. int ret, external_node = 0;
  50. quadg = &qdg;
  51. if (rip_quadg) {
  52. quadg = rip_quadg;
  53. } else {
  54. quadg_flags |= QUADG_GID | QUADG_GNODE;
  55. iptoquadg(rip, me.ext_map, quadg, quadg_flags);
  56. if (new_quadg)
  57. memcpy(new_quadg, quadg, sizeof(quadro_group));
  58. }
  59. if (quadg_gids_cmp(me.cur_quadg, *quadg, 1))
  60. external_node = 1;
  61. if (!external_node) {
  62. iptomap((uintptr_t) me.int_map, rip, me.cur_quadg.ipstart[1], &from);
  63. ret = rnode_find(me.cur_node, from);
  64. } else {
  65. erc = e_rnode_find(me.cur_erc, quadg, 0);
  66. ret = !erc ? -1 : erc->rnode_pos;
  67. }
  68. return ret;
  69. }
  70. /*
  71. * tracer_verify_pkt: It checks the validity of `tracer': The last entry
  72. * in the tracer must be a node present in our r_nodes.
  73. * Instead of using iptoquadg it uses `rip_quadg' if it isn't null.
  74. */
  75. int
  76. tracer_verify_pkt(tracer_chunk * tracer, u_short hops, inet_prefix rip,
  77. quadro_group * rip_quadg, int level)
  78. {
  79. quadro_group qdg, *quadg;
  80. map_node *from, *real_from, *real_gfrom;
  81. int retries = 0, ret;
  82. from = real_from = real_gfrom = 0;
  83. quadg = &qdg;
  84. if (!rip_quadg)
  85. iptoquadg(rip, me.ext_map, quadg, QUADG_GID | QUADG_GNODE);
  86. else
  87. quadg = rip_quadg;
  88. if (!quadg_gids_cmp(*quadg, me.cur_quadg, level))
  89. return 0;
  90. /*
  91. * Now, let's check if we are part of the bcast_hdr->g_node of
  92. * bcast_hdr->level. If not let's drop it! Why the hell this pkt is
  93. * here?
  94. */
  95. if (quadg_gids_cmp(*quadg, me.cur_quadg, level + 1)) {
  96. debug(DBG_INSANE, "%s:%d", ERROR_POS);
  97. return -1;
  98. }
  99. /*
  100. * `from' has to be absolutely one of our rnodes
  101. */
  102. if (!level) {
  103. iptomap((uintptr_t) me.int_map, rip, me.cur_quadg.ipstart[1],
  104. &real_from);
  105. from = node_from_pos(tracer[hops - 1].node, me.int_map);
  106. } else {
  107. real_gfrom = &quadg->gnode[_EL(level)]->g;
  108. from = node_from_pos(quadg->gid[0], me.int_map);
  109. }
  110. /* Look for the `from' node in the int_map. */
  111. if ((real_from && real_from == from) || from) {
  112. /* Is `from' in our rnodes? */
  113. for (retries = 0;
  114. (ret = rnode_find(me.cur_node, from)) == -1 && !retries;
  115. retries++)
  116. radar_wait_new_scan();
  117. if (ret != -1)
  118. return 0;
  119. }
  120. /* `from' is a gnode, look in the ext_map */
  121. if (level) {
  122. /* Look in ext_map */
  123. from = (map_node *) gnode_from_pos(tracer[hops - 1].node,
  124. me.ext_map[_EL(level)]);
  125. if (!from || (real_gfrom && real_gfrom != from)) {
  126. debug(DBG_INSANE, "%s:%d", ERROR_POS);
  127. return -1;
  128. }
  129. ret =
  130. g_rnode_find(me.cur_quadg.gnode[_EL(level)],
  131. (map_gnode *) from);
  132. if (ret == -1) {
  133. debug(DBG_INSANE, "%s:%d gnode: %d, level: %d",
  134. ERROR_POS, tracer[hops - 1].node, level);
  135. return -1;
  136. }
  137. }
  138. return 0;
  139. }
  140. /*
  141. * tracer_add_entry: Append our entry `node' to the tracer pkt `tracer' wich has
  142. * `hops'. It returns the modified tracer pkt in a newly mallocated struct and
  143. * it increments the `*hops'.
  144. * If `tracer' is null it will return the new tracer_pkt.
  145. * On errors it returns NULL.
  146. */
  147. tracer_chunk *
  148. tracer_add_entry(void *void_map, void *void_node, tracer_chunk * tracer,
  149. u_int * hops, u_char level)
  150. {
  151. tracer_chunk *t;
  152. map_node *from;
  153. map_rnode *rfrom = 0;
  154. map_node *map, *node;
  155. map_gnode **ext_map, *gnode;
  156. int pos, new_entry_pos, last_entry_node, nhops;
  157. map = (map_node *) void_map;
  158. node = (map_node *) void_node;
  159. ext_map = (map_gnode **) void_map;
  160. gnode = (map_gnode *) void_node;
  161. (*hops)++;
  162. nhops = *hops;
  163. new_entry_pos = nhops - 1;
  164. t = xzalloc(sizeof(tracer_chunk) * nhops);
  165. if (tracer || nhops > 1) {
  166. /*
  167. * In the tracer_pkt there are already some chunks, we copy
  168. * them in the new pkt.
  169. */
  170. memcpy(t, tracer, sizeof(tracer_chunk) * (nhops - 1));
  171. /*
  172. * We add, in the new entry, the rtt there is from me to the
  173. * node of the the last entry of the old tracer pkt.
  174. */
  175. last_entry_node = tracer[nhops - 2].node;
  176. if (!level) {
  177. from = node_from_pos(last_entry_node, map);
  178. /* check if `from' is in our rnodes */
  179. if ((pos = rnode_find(me.cur_node, from)) == -1) {
  180. debug(DBG_INSANE, "%s:%d lvl: %d last_entry_node: %d",
  181. ERROR_POS, level, last_entry_node);
  182. return 0;
  183. }
  184. rfrom = &me.cur_node->r_node[pos];
  185. } else {
  186. from = (map_node *) gnode_from_pos(last_entry_node,
  187. ext_map[_EL(level)]);
  188. /* check if `from' is in our rnodes */
  189. if ((pos = g_rnode_find(me.cur_quadg.gnode[_EL(level)],
  190. (map_gnode *) from) == -1)) {
  191. debug(DBG_INSANE, "%s:%d lvl: %d last_entry_node: %d",
  192. ERROR_POS, level, last_entry_node);
  193. return 0;
  194. }
  195. rfrom = &me.cur_quadg.gnode[_EL(level)]->g.r_node[pos];
  196. }
  197. t[new_entry_pos].rtt = rfrom->trtt;
  198. }
  199. /* Fill the new entry in the tracer_pkt */
  200. if (!level) {
  201. t[new_entry_pos].gcount = 1;
  202. t[new_entry_pos].node = pos_from_node(node, map);
  203. } else {
  204. t[new_entry_pos].gcount = qspn_gnode_count[_EL(level)];
  205. t[new_entry_pos].node = pos_from_gnode(gnode, ext_map[_EL(level)]);
  206. }
  207. return t;
  208. }
  209. /*
  210. * tracer_add_rtt: Increments the rtt of the `hop'th `tracer' chunk by adding
  211. * the rtt of the rnode who is in the `rpos' postion in me.cur_node->r_node.
  212. * It returns the new rtt value on success.
  213. */
  214. int
  215. tracer_add_rtt(int rpos, tracer_chunk * tracer, u_short hop)
  216. {
  217. tracer[hop].rtt += me.cur_node->r_node[rpos].trtt;
  218. return tracer[hop].rtt;
  219. }
  220. /*
  221. * tracer_get_trtt: It stores in `trtt' the total round trip time needed to
  222. * reach the `tracer[0].node' from the me.cur_node.
  223. * me.cur_node->r_node[`from_rnode_pos'] is the rnode who forwarded us the pkt.
  224. * If it succeeds 0 is returned.
  225. */
  226. int
  227. tracer_get_trtt(int from_rnode_pos, tracer_hdr * trcr_hdr,
  228. tracer_chunk * tracer, u_int * trtt)
  229. {
  230. int hops, i;
  231. u_int trtt_ms = 0;
  232. *trtt = 0;
  233. hops = trcr_hdr->hops;
  234. if (!hops)
  235. return -1;
  236. /* Add the rtt of me -> from */
  237. trtt_ms += me.cur_node->r_node[from_rnode_pos].trtt;
  238. for (i = hops - 1; i >= 0; i--)
  239. trtt_ms += tracer[i].rtt;
  240. *trtt = trtt_ms;
  241. return 0;
  242. }
  243. /*
  244. * tracer_update_gcount: it updates `gcount_counter' by adding the sum of
  245. * gcounts present in `tracer'.
  246. * It then updates the map_gnode.gcount counter of the gnodes present in the
  247. * `ext_map' of the `level'th level.
  248. * It ignores all the tracer chunks < `first_hop'.
  249. */
  250. void
  251. tracer_update_gcount(tracer_hdr * trcr_hdr, tracer_chunk * tracer,
  252. int first_hop, u_int * gcount_counter,
  253. map_node * int_map, map_gnode ** ext_map, int level)
  254. {
  255. map_node *node = 0;
  256. map_gnode *gnode;
  257. u_int hops;
  258. int i;
  259. hops = trcr_hdr->hops;
  260. if (!hops || first_hop >= hops || first_hop < 0)
  261. return;
  262. for (i = first_hop; i >= 0; i--) {
  263. if (level) {
  264. gnode = gnode_from_pos(tracer[i].node, ext_map[_EL(level)]);
  265. qspn_dec_gcount(gcount_counter, level + 1, gnode->gcount);
  266. gnode->gcount = tracer[i].gcount;
  267. } else
  268. node = node_from_pos(tracer[i].node, int_map);
  269. if (level || (!level && node->flags & MAP_VOID))
  270. qspn_inc_gcount(gcount_counter, level + 1, tracer[i].gcount);
  271. }
  272. }
  273. /*
  274. * tracer_build_bentry: It builds the bnode_block to be added in the bnode's
  275. * entry in the tracer pkt. It stores in `bnodechunk' the pointer to the
  276. * first bnode_chunk and returns a pointer to the bnode_hdr.
  277. * `bnode_hdr' and `bnode_chunk' are on the same block of allocated memory.
  278. * The number of bnode_chunks is stored in `bnode_links'.
  279. * On errors it returns a NULL pointer.
  280. */
  281. bnode_hdr *
  282. tracer_build_bentry(void *void_map, void *void_node,
  283. quadro_group * node_quadg, bnode_chunk ** bnodechunk,
  284. int *bnode_links, u_char level)
  285. {
  286. map_node *int_map, *node;
  287. map_gnode **ext_map, *gnode;
  288. map_gnode *gn;
  289. bnode_hdr *bhdr;
  290. bnode_chunk *bchunk;
  291. int i, bm, node_pos;
  292. size_t bblock_sz;
  293. u_char lvl;
  294. char *bblock;
  295. u_char *bnode_gid;
  296. int_map = (map_node *) void_map;
  297. node = (map_node *) void_node;
  298. ext_map = (map_gnode **) void_map;
  299. gnode = (map_gnode *) void_node;
  300. if (level == me.cur_quadg.levels - 1)
  301. goto error;
  302. if (!level)
  303. node_pos = pos_from_node(node, int_map);
  304. else
  305. node_pos = pos_from_gnode(gnode, ext_map[_EL(level)]);
  306. bm = map_find_bnode(me.bnode_map[level], me.bmap_nodes[level],
  307. node_pos);
  308. if (bm == -1)
  309. goto error;
  310. /*This will never happen, but we know the universe is fucking bastard */
  311. if (!me.bnode_map[level][bm].links)
  312. goto error;
  313. bblock_sz = BNODEBLOCK_SZ(level + 1, me.bnode_map[level][bm].links);
  314. bblock = xzalloc(bblock_sz);
  315. bhdr = (bnode_hdr *) bblock;
  316. bhdr->bnode_levels = level + 1;
  317. bnode_gid = (u_char *) (bblock + sizeof(bnode_hdr));
  318. bchunk =
  319. (bnode_chunk *) (bnode_gid + sizeof(u_char) * bhdr->bnode_levels);
  320. for (i = 0; i < bhdr->bnode_levels; i++)
  321. bnode_gid[i] = node_quadg->gid[i];
  322. /* Fill the bnode chunks */
  323. for (i = 0; i < me.bnode_map[level][bm].links; i++) {
  324. gn = (map_gnode *) me.bnode_map[level][bm].r_node[i].r_node;
  325. lvl = extmap_find_level(me.ext_map, gn, me.cur_quadg.levels);
  326. if (lvl != level + 1)
  327. continue;
  328. bchunk[i].gnode = pos_from_gnode(gn, me.ext_map[_EL(lvl)]);
  329. bchunk[i].level = lvl;
  330. bchunk[i].rtt = me.bnode_map[level][bm].r_node[i].trtt;
  331. bhdr->links++;
  332. debug(DBG_INSANE, "tracer_build_bentry: lvl %d bchunk[%d].gnode:"
  333. " %d", level, i, bchunk[i].gnode);
  334. }
  335. if (!bhdr->links) {
  336. xfree(bblock);
  337. goto error;
  338. }
  339. /* Reduce the size of the bblock to its effective size. Initially we
  340. * allocated it considering all the `me.bnode_map[level][bm].links'
  341. * links, but if bhdr->links is lesser than
  342. * me.bnode_map[level][bm].links that means they are not all added in
  343. * the chunks.
  344. */
  345. if (bhdr->links < me.bnode_map[level][bm].links) {
  346. bblock_sz = BNODEBLOCK_SZ(bhdr->bnode_levels, bhdr->links);
  347. bblock = xrealloc(bblock, bblock_sz);
  348. bhdr = (bnode_hdr *) bblock;
  349. bchunk =
  350. (bnode_chunk *) (bblock + BNODE_HDR_SZ(bhdr->bnode_levels));
  351. }
  352. *bnode_links = bhdr->links;
  353. *bnodechunk = bchunk;
  354. return bhdr;
  355. error:
  356. *bnode_links = 0;
  357. *bnodechunk = 0;
  358. return 0;
  359. }
  360. /*
  361. * tracer_pkt_build:
  362. * It builds a tracer_pkt and stores it in `pkt'.
  363. *
  364. * If `trcr_hdr' or `tracer' are null, it will build a brand new tracer_pkt,
  365. * otherwise it will append in the `tracer' the new entry.
  366. * Tracer_pkt_build will append also the old bblock:
  367. * `old_bblocks' is the number of bblocks,
  368. * `old_bblock_buf' is the block of the old bblock and it is `old_bblock_sz' big.
  369. * If `old_bblocks' is 0 or `old_bblock_buf' and `old_bblock_sz' are null
  370. * they are ignored.
  371. *
  372. * The `pkt.hdr.op' is set to `rq', `pkt.hdr.id' to `rq_id' and the
  373. * `bcast_hdr.sub_id' to `bcast_sub_id'.
  374. *
  375. * The packet shall be sent with flood_pkt_send.
  376. * It returns -1 on errors.
  377. */
  378. int
  379. tracer_pkt_build(u_char rq, int rq_id, int bcast_sub_id,
  380. int gnode_id, u_char gnode_level,
  381. brdcast_hdr * bcast_hdr, tracer_hdr * trcr_hdr,
  382. tracer_chunk * tracer, u_short old_bblocks,
  383. char *old_bblock_buf, size_t old_bblock_sz, PACKET * pkt)
  384. {
  385. brdcast_hdr bh;
  386. tracer_hdr th;
  387. tracer_chunk *new_tracer = 0;
  388. bnode_hdr *new_bhdr = 0;
  389. bnode_chunk *new_bchunk = 0;
  390. map_node *root_node, *upper_root_node = 0;
  391. char *igw_pack = 0;
  392. void *void_map, *void_node, *p;
  393. size_t new_bblock_sz = 0, total_bblock_sz = 0, igw_pack_sz = 0;
  394. u_int hops = 0;
  395. int new_bblock_links = 0, new_bblocks = 0, tot_new_bblocks =
  396. 0, tot_bblocks = 0;
  397. if (!trcr_hdr || !tracer || !bcast_hdr) {
  398. /* Brand new tracer packet */
  399. bcast_hdr = &bh;
  400. setzero(bcast_hdr, sizeof(brdcast_hdr));
  401. bcast_hdr->gttl = MAXGROUPNODE - 1;
  402. bcast_hdr->level = gnode_level + 1;
  403. bcast_hdr->g_node = gnode_id;
  404. trcr_hdr = &th;
  405. setzero(trcr_hdr, sizeof(tracer_hdr));
  406. }
  407. hops = trcr_hdr->hops;
  408. setzero(pkt, sizeof(PACKET));
  409. pkt->hdr.op = rq;
  410. pkt->hdr.id = rq_id;
  411. pkt->hdr.flags |= BCAST_PKT;
  412. bcast_hdr->flags |= BCAST_TRACER_PKT;
  413. if (!gnode_level) {
  414. void_map = (void *) me.int_map;
  415. root_node = me.cur_node;
  416. void_node = (void *) root_node;
  417. } else {
  418. void_map = (void *) me.ext_map;
  419. root_node = &me.cur_quadg.gnode[_EL(gnode_level)]->g;
  420. void_node = (void *) root_node;
  421. }
  422. if (gnode_level < me.cur_quadg.levels)
  423. upper_root_node = &me.cur_quadg.gnode[_EL(gnode_level + 1)]->g;
  424. /*
  425. * Time to append our entry in the tracer_pkt
  426. */
  427. new_tracer = tracer_add_entry(void_map, void_node, tracer, &hops,
  428. gnode_level);
  429. if (!new_tracer) {
  430. debug(DBG_NOISE, "tracer_pkt_build: Cannot add the new"
  431. " entry in the tracer_pkt");
  432. return -1;
  433. }
  434. if (rq == QSPN_OPEN && !trcr_hdr->first_qspn_open_chunk)
  435. trcr_hdr->first_qspn_open_chunk = hops;
  436. /* If we are a bnode we have to append the bnode_block too. */
  437. if (me.cur_node->flags & MAP_BNODE &&
  438. gnode_level < me.cur_quadg.levels - 1 &&
  439. upper_root_node->flags & MAP_BNODE) {
  440. new_bhdr = tracer_build_bentry(void_map, void_node, &me.cur_quadg,
  441. &new_bchunk, &new_bblock_links,
  442. gnode_level);
  443. if (new_bhdr) {
  444. tot_new_bblocks = 1;
  445. new_bblock_sz = BNODEBLOCK_SZ(new_bhdr->bnode_levels,
  446. new_bblock_links);
  447. bcast_hdr->flags |= BCAST_TRACER_BBLOCK;
  448. trcr_hdr->flags |= TRCR_BBLOCK;
  449. }
  450. }
  451. if (restricted_mode &&
  452. ((!gnode_level && server_opt.share_internet && me.inet_connected)
  453. || (gnode_level && me.igws_counter[gnode_level - 1]))) {
  454. igw_pack =
  455. igw_build_bentry(gnode_level, &igw_pack_sz, &new_bblocks);
  456. /* Append the igw_pack after the new bblock */
  457. if (igw_pack) {
  458. total_bblock_sz = new_bblock_sz + igw_pack_sz;
  459. new_bhdr = xrealloc(new_bhdr, total_bblock_sz);
  460. tot_new_bblocks += new_bblocks;
  461. bcast_hdr->flags |= BCAST_TRACER_BBLOCK;
  462. trcr_hdr->flags |= TRCR_IGW;
  463. p = (char *) new_bhdr + new_bblock_sz;
  464. memcpy(p, igw_pack, igw_pack_sz);
  465. }
  466. new_bblock_sz += igw_pack_sz;
  467. }
  468. /*
  469. * If in the old tracer_pkt is present a bblock, we append it after the
  470. * new entry.
  471. */
  472. if (old_bblocks && old_bblock_buf && old_bblock_sz) {
  473. total_bblock_sz = new_bblock_sz + old_bblock_sz;
  474. new_bhdr = xrealloc(new_bhdr, total_bblock_sz);
  475. p = (char *) new_bhdr + new_bblock_sz;
  476. memcpy(p, old_bblock_buf, old_bblock_sz);
  477. bcast_hdr->flags |= BCAST_TRACER_BBLOCK;
  478. new_bblock_sz += old_bblock_sz;
  479. }
  480. tot_bblocks = tot_new_bblocks + old_bblocks;
  481. /*
  482. * Here we are really building the pkt, packing all the stuff into a
  483. * single bullet.
  484. */
  485. trcr_hdr->hops = hops;
  486. bcast_hdr->sub_id = bcast_sub_id;
  487. bcast_hdr->sz = TRACERPKT_SZ(hops) + new_bblock_sz;
  488. pkt->hdr.sz = BRDCAST_SZ(bcast_hdr->sz);
  489. pkt_addcompress(pkt);
  490. pkt_addtimeout(pkt, TRACER_RQ_TIMEOUT, 0, 1);
  491. pkt_addnonblock(pkt);
  492. pkt->msg = tracer_pack_pkt(bcast_hdr, trcr_hdr, new_tracer,
  493. (char *) new_bhdr, new_bblock_sz,
  494. tot_bblocks);
  495. /* Yea, finished */
  496. if (new_tracer)
  497. xfree(new_tracer);
  498. if (new_bhdr)
  499. xfree(new_bhdr);
  500. return 0;
  501. }
  502. /*
  503. * tracer_pack_pkt: it packs the tracer packet.
  504. *
  505. * If `new_bblocks' isn't zero, the first `new_bblocks'# bblocks contained in
  506. * `bblocks' are converted to network order.
  507. */
  508. char *
  509. tracer_pack_pkt(brdcast_hdr * bcast_hdr, tracer_hdr * trcr_hdr,
  510. tracer_chunk * tracer, char *bblocks, size_t bblocks_sz,
  511. int new_bblocks)
  512. {
  513. bnode_hdr *bhdr;
  514. bnode_chunk *bchunk;
  515. size_t pkt_sz;
  516. char *msg, *buf;
  517. int i, e;
  518. pkt_sz = BRDCAST_SZ(TRACERPKT_SZ(trcr_hdr->hops) + bblocks_sz);
  519. buf = msg = xzalloc(pkt_sz);
  520. /* add broadcast header */
  521. memcpy(buf, bcast_hdr, sizeof(brdcast_hdr));
  522. ints_host_to_network(buf, brdcast_hdr_iinfo);
  523. buf += sizeof(brdcast_hdr);
  524. /* add the tracer header */
  525. memcpy(buf, trcr_hdr, sizeof(tracer_hdr));
  526. ints_host_to_network(buf, tracer_hdr_iinfo);
  527. buf += sizeof(tracer_hdr);
  528. /* add the tracer chunks and convert them to network order */
  529. for (i = 0; i < trcr_hdr->hops; i++) {
  530. memcpy(buf, &tracer[i], sizeof(tracer_chunk));
  531. ints_host_to_network(buf, tracer_chunk_iinfo);
  532. buf += sizeof(tracer_chunk);
  533. }
  534. /* add the bnode blocks */
  535. if (bblocks_sz && bblocks) {
  536. /* copy the whole block */
  537. memcpy(buf, bblocks, bblocks_sz);
  538. /* and convert it to network order */
  539. for (e = 0; e < new_bblocks; e++) {
  540. bhdr = (bnode_hdr *) buf;
  541. bchunk = (bnode_chunk *) ((char *) buf + sizeof(bnode_hdr) +
  542. sizeof(u_char) * bhdr->bnode_levels);
  543. for (i = 0; i < bhdr->links; i++)
  544. ints_host_to_network(&bchunk[i], bnode_chunk_iinfo);
  545. buf += BNODEBLOCK_SZ(bhdr->bnode_levels, bhdr->links);
  546. ints_host_to_network(bhdr, bnode_hdr_iinfo);
  547. }
  548. }
  549. return msg;
  550. }
  551. /*
  552. * tracer_unpack_pkt: Given a packet `rpkt' it scomposes the rpkt.msg in
  553. * `new_bcast_hdr', `new_tracer_hdr', `new_tracer', 'new_bhdr', and
  554. * `new_block_sz'.
  555. * If the `new_rip_quadg' pointer is not null, the quadro_group of the
  556. * `rpk.from' ip is stored in it.
  557. * It returns 0 if the packet is valid, otherwise -1 is returned.
  558. * Note that rpkt.msg will be modified during the unpacking.
  559. */
  560. int
  561. tracer_unpack_pkt(PACKET rpkt, brdcast_hdr ** new_bcast_hdr,
  562. tracer_hdr ** new_tracer_hdr, tracer_chunk ** new_tracer,
  563. bnode_hdr ** new_bhdr, size_t * new_bblock_sz,
  564. quadro_group * new_rip_quadg, int *real_from_rpos)
  565. {
  566. brdcast_hdr *bcast_hdr;
  567. tracer_hdr *trcr_hdr;
  568. tracer_chunk *tracer;
  569. bnode_hdr *bhdr = 0;
  570. quadro_group rip_quadg;
  571. size_t bblock_sz = 0, tracer_sz = 0;
  572. int level, i;
  573. bcast_hdr = BRDCAST_HDR_PTR(rpkt.msg);
  574. ints_network_to_host(bcast_hdr, brdcast_hdr_iinfo);
  575. trcr_hdr = TRACER_HDR_PTR(rpkt.msg);
  576. ints_network_to_host(trcr_hdr, tracer_hdr_iinfo);
  577. tracer = TRACER_CHUNK_PTR(rpkt.msg);
  578. *new_bcast_hdr = 0;
  579. *new_tracer_hdr = 0;
  580. *new_tracer = 0;
  581. *new_bhdr = 0;
  582. *new_bblock_sz = 0;
  583. *real_from_rpos = 0;
  584. tracer_sz = BRDCAST_SZ(TRACERPKT_SZ(trcr_hdr->hops));
  585. if (tracer_sz > rpkt.hdr.sz || !trcr_hdr->hops ||
  586. trcr_hdr->hops > MAXGROUPNODE) {
  587. debug(DBG_INSANE, "%s:%d messed tracer pkt: %d, %d, %d",
  588. ERROR_POS, tracer_sz, rpkt.hdr.sz, trcr_hdr->hops);
  589. return -1;
  590. }
  591. if (rpkt.hdr.op == QSPN_CLOSE)
  592. /* It can be non-zero only if it is a QSPN_OPEN */
  593. trcr_hdr->first_qspn_open_chunk = 0;
  594. /* Convert the tracer chunks to host order */
  595. for (i = 0; i < trcr_hdr->hops; i++)
  596. ints_network_to_host(&tracer[i], tracer_chunk_iinfo);
  597. if (rpkt.hdr.sz > tracer_sz) {
  598. /* There is also a bnode block in the tracer pkt */
  599. bblock_sz = rpkt.hdr.sz - tracer_sz;
  600. bhdr = (bnode_hdr *) (rpkt.msg + tracer_sz);
  601. if ((!(trcr_hdr->flags & TRCR_BBLOCK)
  602. && !(trcr_hdr->flags & TRCR_IGW))
  603. || !(bcast_hdr->flags & BCAST_TRACER_BBLOCK)) {
  604. debug(DBG_INSANE, ERROR_MSG "trcr_flags: %d flags: %d",
  605. ERROR_POS, trcr_hdr->flags, bcast_hdr->flags);
  606. return -1;
  607. }
  608. }
  609. if ((level = bcast_hdr->level) > 0)
  610. level--;
  611. if (!(rpkt.hdr.flags & BCAST_PKT)
  612. || !(bcast_hdr->flags & BCAST_TRACER_PKT) || level > FAMILY_LVLS) {
  613. debug(DBG_INSANE, "%s:%d", ERROR_POS);
  614. return -1;
  615. }
  616. /* Convert the ip in quadro_group */
  617. iptoquadg(rpkt.from, me.ext_map, &rip_quadg, QUADG_GID | QUADG_GNODE);
  618. memcpy(new_rip_quadg, &rip_quadg, sizeof(quadro_group));
  619. if (tracer_verify_pkt
  620. (tracer, trcr_hdr->hops, rpkt.from, &rip_quadg, level))
  621. return -1;
  622. *real_from_rpos = ip_to_rfrom(rpkt.from, &rip_quadg, 0, 0);
  623. if (*real_from_rpos < 0) {
  624. debug(DBG_INSANE, "%s:%d", ERROR_POS);
  625. return -1;
  626. }
  627. *new_bcast_hdr = bcast_hdr;
  628. *new_tracer_hdr = trcr_hdr;
  629. *new_tracer = tracer;
  630. *new_bhdr = bhdr;
  631. *new_bblock_sz = bblock_sz;
  632. return 0;
  633. }
  634. /*
  635. * tracer_split_bblock: It searches from bnode_block_start to
  636. * bnode_block_start+bblock_sz for bnode blocks.
  637. * It puts the address of the found bblock_hdr in the `bbl_hdr' (bnode block list)
  638. * and the address pointing to the start of the bnode_chunk in the `bbl'. The
  639. * total size of all the valid bblocks considered is stored in `*bblock_found_sz'.
  640. * It then returns the number of bblocks found.
  641. *
  642. * During the splitting the bblock is modified 'cause it is converted in host
  643. * order.
  644. * Remember to xfree bbl_hdr and bbl after using tracer_split_bblock too.
  645. * On error zero is returned.
  646. */
  647. u_short
  648. tracer_split_bblock(void *bnode_block_start, size_t bblock_sz,
  649. bnode_hdr *** bbl_hdr, bnode_chunk **** bbl,
  650. size_t * bblock_found_sz)
  651. {
  652. bnode_hdr *bblock_hdr;
  653. bnode_chunk *bblock;
  654. bnode_hdr **bblist_hdr = 0;
  655. bnode_chunk ***bblist = 0;
  656. u_char *bnode_gid;
  657. size_t bsz = 0;
  658. int loop, e, p, x = 0;
  659. *bblock_found_sz = 0;
  660. if (!bblock_sz)
  661. return 0;
  662. for (loop = 0; loop <= 1; loop++) {
  663. /*
  664. * The second `for' below acts in different ways for different
  665. * values of `loop'.
  666. * When `loop' == 0 it just counts how many valid bblocks there
  667. * are, then it allocs the right amount of memory for
  668. * `bblist_hdr' and `bblist'.
  669. * When `loop' == 1 it fills the `bblist_hdr' and `bblist'
  670. * arrays.
  671. *
  672. * If we use just one loop we are forced to xrealloc
  673. * `bblist_hdr' and `bblist' many times, because we don't know
  674. * how many bblocks thereare. The malloc operation are slow,
  675. * therefore to use only one xmalloc we prefer to count first.
  676. */
  677. for (e = 0, x = 0; e < bblock_sz;) {
  678. bblock_hdr = (void *) ((char *) bnode_block_start + e);
  679. if (!loop)
  680. ints_network_to_host(bblock_hdr, bnode_hdr_iinfo);
  681. bnode_gid = (u_char *) bblock_hdr + sizeof(bnode_hdr);
  682. bblock = (bnode_chunk *) ((char *) bnode_gid +
  683. (bblock_hdr->bnode_levels *
  684. sizeof(u_char)));
  685. if (bblock_hdr->links <= 0
  686. || bblock_hdr->links >= MAXGROUPNODE) {
  687. e += BNODEBLOCK_SZ(bblock_hdr->bnode_levels, 0);
  688. continue;
  689. }
  690. bsz =
  691. BNODEBLOCK_SZ(bblock_hdr->bnode_levels, bblock_hdr->links);
  692. /*Are we going far away the end of the buffer? */
  693. if (bblock_sz - e < bsz)
  694. break;
  695. if (loop) {
  696. bblist_hdr[x] = bblock_hdr;
  697. bblist[x] =
  698. xmalloc(sizeof(bnode_chunk *) * bblock_hdr->links);
  699. for (p = 0; p < bblock_hdr->links; p++) {
  700. bblist[x][p] = &bblock[p];
  701. ints_network_to_host(&bblock[p], bnode_chunk_iinfo);
  702. }
  703. }
  704. if (!loop)
  705. (*bblock_found_sz) += bsz;
  706. x++;
  707. e += bsz;
  708. }
  709. if (!loop) {
  710. if (!x)
  711. return 0;
  712. bblist_hdr = xmalloc(sizeof(bnode_hdr *) * x);
  713. bblist = xmalloc(sizeof(bnode_chunk *) * x);
  714. }
  715. }
  716. *bbl_hdr = bblist_hdr;
  717. *bbl = bblist;
  718. return x;
  719. }
  720. /*
  721. * tracer_store_bblock: stores in the bnode map the chunks of the bblock
  722. * starting at `bnode_block_start'.
  723. * In `*bblocks_found' it stores the number of bblocks considered and stores in
  724. * `bblocks_found_block' these bblocks. The `bblocks_found_block' remains in
  725. * host order.
  726. * Remember to xfree(bblocks_found_block);
  727. * On error -1 is returned.
  728. */
  729. int
  730. tracer_store_bblock(u_char level, tracer_hdr * trcr_hdr,
  731. tracer_chunk * tracer, void *bnode_block_start,
  732. size_t bblock_sz, u_short * bblocks_found,
  733. char **bblocks_found_block, size_t * bblock_found_sz)
  734. {
  735. map_node *node;
  736. map_gnode *gnode;
  737. bnode_hdr **bblist_hdr = 0;
  738. bnode_chunk ***bblist = 0;
  739. map_rnode rn;
  740. int i, e, o, f, p, bm, igws_found = 0;
  741. u_short bb;
  742. size_t found_block_sz, bsz, x;
  743. char *found_block;
  744. u_char *bnode_gid, bnode, blevel;
  745. /*
  746. * Split the block
  747. */
  748. bb = tracer_split_bblock(bnode_block_start, bblock_sz, &bblist_hdr,
  749. &bblist, &found_block_sz);
  750. *bblocks_found = bb;
  751. if (!bb) {
  752. /* The bblock was malformed -_- */
  753. debug(DBG_NORMAL, ERROR_MSG "malformed bnode block", ERROR_POS);
  754. *bblock_found_sz = 0;
  755. *bblocks_found_block = 0;
  756. return -1;
  757. }
  758. /*
  759. * Store the received bnode blocks
  760. */
  761. igws_found = x = 0;
  762. *bblocks_found_block = found_block = xmalloc(found_block_sz);
  763. for (i = 0; i < bb; i++) {
  764. bnode_gid = (u_char *) bblist_hdr[i] + sizeof(bnode_hdr);
  765. /* We update only the bmaps which are at
  766. * levels where our gnodes are in common with
  767. * those of the bnode, which sent us this
  768. * bblock */
  769. for (o = level, f = 0; o >= 0; o--)
  770. if (bnode_gid[o] != me.cur_quadg.gid[o]) {
  771. f = 1;
  772. break;
  773. }
  774. if (!f) {
  775. /*
  776. * bnode_gid is equal to me.cur_quadg.gid, so this
  777. * bnode block was sent by ourself, skip it.
  778. */
  779. debug(DBG_NORMAL, ERROR_MSG "skipping the %d bnode,"
  780. "it was built by us!", ERROR_POS, i);
  781. goto discard_bblock;
  782. }
  783. /*
  784. * Check if this bblock is an IGW. If it is, store it in
  785. * me.igws
  786. */
  787. if (bblist[i][0]->level >= FAMILY_LVLS + 1) {
  788. if (restricted_mode &&
  789. (igws_found < MAX_IGW_PER_QSPN_CHUNK &&
  790. trcr_hdr->flags & TRCR_IGW)) {
  791. if (server_opt.use_shared_inet)
  792. igw_store_bblock(bblist_hdr[i], bblist[i][0], level);
  793. igws_found++;
  794. goto skip_bmap;
  795. } else {
  796. debug(DBG_NOISE, ERROR_MSG "Malforded bblock entry",
  797. ERROR_POS);
  798. goto discard_bblock;
  799. }
  800. }
  801. if (!(trcr_hdr->flags & TRCR_BBLOCK)) {
  802. debug(DBG_NOISE, ERROR_MSG "Malforded bblock entry",
  803. ERROR_POS);
  804. goto discard_bblock;
  805. }
  806. for (blevel = o; blevel < bblist_hdr[i]->bnode_levels; blevel++) {
  807. bnode = bnode_gid[blevel];
  808. if (!blevel) {
  809. node = node_from_pos(bnode, me.int_map);
  810. node->flags |= MAP_BNODE;
  811. node->flags &= ~QSPN_OLD;
  812. } else {
  813. gnode = gnode_from_pos(bnode, me.ext_map[_EL(blevel)]);
  814. gnode->g.flags |= MAP_BNODE;
  815. gnode->g.flags &= ~QSPN_OLD;
  816. }
  817. /* Let's check if we have this bnode in the bmap, if not let's
  818. * add it */
  819. bm = map_find_bnode(me.bnode_map[blevel],
  820. me.bmap_nodes[blevel], bnode);
  821. if (bm == -1)
  822. bm = map_add_bnode(&me.bnode_map[blevel],
  823. &me.bmap_nodes[blevel], bnode, 0);
  824. /* This bnode has the BMAP_UPDATE
  825. * flag set, thus this is the first
  826. * time we update him during this new
  827. * qspn_round and for this reason
  828. * delete all its rnodes */
  829. if (me.bnode_map[blevel][bm].flags & BMAP_UPDATE) {
  830. rnode_destroy(&me.bnode_map[blevel][bm]);
  831. me.bnode_map[blevel][bm].flags &= ~BMAP_UPDATE;
  832. }
  833. /* Store the rnodes of the bnode */
  834. for (e = 0; e < bblist_hdr[i]->links; e++) {
  835. setzero(&rn, sizeof(map_rnode));
  836. debug(DBG_INSANE, "Bnode %d new link %d: gid %d lvl %d",
  837. bnode, e, bblist[i][e]->gnode, bblist[i][e]->level);
  838. gnode = gnode_from_pos(bblist[i][e]->gnode,
  839. me.
  840. ext_map[_EL(bblist[i][e]->level)]);
  841. gnode->g.flags &= ~QSPN_OLD;
  842. rn.r_node = (int *) gnode;
  843. rn.trtt = bblist[i][e]->rtt;
  844. if ((p = rnode_find(&me.bnode_map[blevel][bm], gnode)) > 0) {
  845. /* Overwrite the current rnode */
  846. map_rnode_insert(&me.bnode_map[blevel][bm], p, &rn);
  847. } else
  848. /* Add a new rnode */
  849. rnode_add(&me.bnode_map[blevel][bm], &rn);
  850. }
  851. }
  852. skip_bmap:
  853. /* Copy the found bblock in `bblocks_found_block' and converts
  854. * it in network order */
  855. bsz =
  856. BNODEBLOCK_SZ(bblist_hdr[i]->bnode_levels,
  857. bblist_hdr[i]->links);
  858. memcpy(found_block + x, bblist_hdr[i], bsz);
  859. x += bsz;
  860. discard_bblock:
  861. xfree(bblist[i]);
  862. }
  863. *bblock_found_sz = x;
  864. xfree(bblist_hdr);
  865. xfree(bblist);
  866. return 0;
  867. }
  868. /*
  869. * tracer_check_node_collision: if a collision is detected between me and the
  870. * (g)node `node', new_rehook shall be called and 1 returned.
  871. * `tr_node' is the gid of `node'.
  872. */
  873. int
  874. tracer_check_node_collision(tracer_hdr * trcr, int hop, map_node * node,
  875. int tr_node, int tr_gcount, int level)
  876. {
  877. map_gnode *gnode;
  878. int probable_collision = 0;
  879. u_int gcount;
  880. map_node *root_node;
  881. gnode = (map_gnode *) node;
  882. if (!level) {
  883. gcount = 0;
  884. root_node = me.cur_node;
  885. } else {
  886. gcount = tr_gcount;
  887. root_node = &me.cur_quadg.gnode[_EL(level)]->g;
  888. }
  889. if (node == root_node &&
  890. ((trcr->first_qspn_open_chunk &&
  891. !((trcr->first_qspn_open_chunk - 1 == hop &&
  892. (root_node->flags & QSPN_OPENER)) ||
  893. (hop < trcr->first_qspn_open_chunk - 1)
  894. )
  895. ) ||
  896. (!trcr->first_qspn_open_chunk &&
  897. !(!hop && (root_node->flags & QSPN_STARTER))
  898. )
  899. )
  900. )
  901. probable_collision = 1;
  902. if (probable_collision) {
  903. loginfo("%s collision detected! Checking rehook status...",
  904. !level ? "node" : "gnode");
  905. debug(DBG_NORMAL, "collision info: i: %d, starter %d opener %d",
  906. hop, me.cur_node->flags & QSPN_STARTER,
  907. me.cur_node->flags & QSPN_OPENER);
  908. new_rehook(gnode, tr_node, level, gcount);
  909. return 1;
  910. }
  911. return 0;
  912. }
  913. /*
  914. * tracer_store_pkt: This is the main function used to keep the int/ext_map's
  915. * karma in peace.
  916. * It updates the internal or external map with the given tracer pkt.
  917. *
  918. * `rip' is the rnode ip. It is the last node who forwarded the tracer pkt to
  919. * us. `rip_quadg' is a quadro_group struct related to it.
  920. *
  921. * `trcr_hdr' is the header of the tracer pkt, while `tracer' points at the
  922. * start of its body.
  923. *
  924. * The bnode blocks (if any) are unpacked and used to update the data of the
  925. * bordering gnodes. Read the tracer_store_bblock() description (above) to
  926. * know the meaning of the other arguments.
  927. */
  928. int
  929. tracer_store_pkt(inet_prefix rip, quadro_group * rip_quadg, u_char level,
  930. tracer_hdr * trcr_hdr, tracer_chunk * tracer,
  931. void *bnode_block_start, size_t bblock_sz,
  932. u_short * bblocks_found, char **bblocks_found_block,
  933. size_t * bblock_found_sz)
  934. {
  935. map_node *from, *node, *root_node;
  936. map_gnode *gfrom, *gnode = 0;
  937. map_rnode rnn;
  938. int i, e, x, f, diff, from_rnode_pos, skip_rfrom;
  939. int gfrom_rnode_pos, from_tpos;
  940. u_int hops, trtt_ms = 0;
  941. hops = trcr_hdr->hops;
  942. /* Nothing to store */
  943. if (hops <= 0)
  944. return 0;
  945. from_tpos = hops - 1;
  946. if (!level) {
  947. from = node_from_pos(tracer[from_tpos].node, me.int_map);
  948. root_node = me.cur_node;
  949. } else {
  950. gfrom =
  951. gnode_from_pos(tracer[from_tpos].node, me.ext_map[_EL(level)]);
  952. from = &gfrom->g;
  953. root_node = &me.cur_quadg.gnode[_EL(level)]->g;
  954. }
  955. from_rnode_pos = rnode_find(root_node, from);
  956. /* It's alive, keep it young */
  957. from->flags &= ~QSPN_OLD;
  958. if (bblock_sz && level != me.cur_quadg.levels - 1) {
  959. /* Well, well, we have to take care of bnode blocks, split the
  960. * bblock. */
  961. tracer_store_bblock(level, trcr_hdr, tracer, bnode_block_start,
  962. bblock_sz, bblocks_found, bblocks_found_block,
  963. bblock_found_sz);
  964. }
  965. /*
  966. * * Store the qspn routes to reach all the nodes of the tracer pkt *
  967. */
  968. skip_rfrom = 0;
  969. node = root_node;
  970. if (!level) {
  971. /* We skip the node at hops-1 which it is the `from' node. The radar()
  972. * takes care of him. */
  973. skip_rfrom = 1;
  974. } else if (from == root_node) {
  975. /* If tracer[hops-1].node is our gnode then we can skip it */
  976. skip_rfrom = 1;
  977. from_tpos = hops - 2;
  978. from_rnode_pos = ip_to_rfrom(rip, rip_quadg, 0, 0);
  979. if (hops > 1) {
  980. map_rnode rnn;
  981. /*
  982. * hops-2 is an rnode of hops-1, which is our gnode,
  983. * so we update the `gfrom' and `from' vars and let
  984. * them point to hops-2.
  985. */
  986. gfrom = gnode_from_pos(tracer[hops - 2].node,
  987. me.ext_map[_EL(level)]);
  988. from = &gfrom->g;
  989. from->flags |= MAP_GNODE | MAP_RNODE;
  990. gfrom_rnode_pos = rnode_find(root_node, gfrom);
  991. if (gfrom_rnode_pos == -1) {
  992. gfrom_rnode_pos = root_node->links;
  993. /*
  994. * Add an rnode in the root_node which point to
  995. * `gfrom', because it is our new (g)rnode.
  996. */
  997. setzero(&rnn, sizeof(map_rnode));
  998. rnn.r_node = (int *) gfrom;
  999. rnode_add(root_node, &rnn);
  1000. }
  1001. root_node->r_node[gfrom_rnode_pos].trtt = tracer[hops - 2].rtt;
  1002. }
  1003. /* we are using the real from, so the root node is the one
  1004. * at level 0 */
  1005. node = me.cur_node;
  1006. } else if (me.cur_node->flags & MAP_BNODE) {
  1007. /* If we are a bnode which borders on the `from' [g]node, then we
  1008. * can skip it. */
  1009. i = map_find_bnode_rnode(me.bnode_map[level - 1],
  1010. me.bmap_nodes[level - 1], from);
  1011. if (i != -1)
  1012. skip_rfrom = 1;
  1013. }
  1014. if (from_tpos >= 0) { /* Is there a rnode in the tracer ? */
  1015. /* Update `qspn_gnode_count' */
  1016. tracer_update_gcount(trcr_hdr, tracer, from_tpos,
  1017. qspn_gnode_count, me.int_map, me.ext_map,
  1018. level);
  1019. /* Let's see if we have to rehook */
  1020. new_rehook((map_gnode *) from, tracer[from_tpos].node, level,
  1021. tracer[from_tpos].gcount);
  1022. }
  1023. /* We add in the total rtt the first rtt which is me -> from */
  1024. trtt_ms = node->r_node[from_rnode_pos].trtt;
  1025. /* If we are skipping the rfrom, remember to sum its rtt */
  1026. if (skip_rfrom)
  1027. trtt_ms += tracer[hops - 1].rtt;
  1028. for (i = (hops - skip_rfrom) - 1; i >= 0; i--) {
  1029. if (i)
  1030. trtt_ms += tracer[i].rtt;
  1031. if (!level)
  1032. node = node_from_pos(tracer[i].node, me.int_map);
  1033. else {
  1034. gnode = gnode_from_pos(tracer[i].node, me.ext_map[_EL(level)]);
  1035. node = &gnode->g;
  1036. if (tracer[i].gcount == NODES_PER_LEVEL(level))
  1037. /* The gnode is full */
  1038. gnode->g.flags |= GMAP_FULL;
  1039. }
  1040. if (tracer_check_node_collision(trcr_hdr, i, node, tracer[i].node,
  1041. tracer[i].gcount, level))
  1042. break;
  1043. node->flags &= ~QSPN_OLD;
  1044. if (node->flags & MAP_VOID) {
  1045. /* Ehi, we hadn't this node in the map. Add it. */
  1046. node->flags &= ~MAP_VOID;
  1047. node->flags |= MAP_UPDATE;
  1048. if (level)
  1049. gnode->flags &= ~GMAP_VOID;
  1050. gnode_inc_seeds(&me.cur_quadg, level);
  1051. debug(DBG_INSANE, "TRCR_STORE: node %d added", tracer[i].node);
  1052. }
  1053. /* update the rtt of the node */
  1054. for (e = 0, f = 0; e < node->links; e++) {
  1055. if (node->r_node[e].r_node == (int *) from) {
  1056. diff = abs(node->r_node[e].trtt - trtt_ms);
  1057. if (diff >= RTT_DELTA) {
  1058. node->r_node[e].trtt = trtt_ms;
  1059. node->flags |= MAP_UPDATE;
  1060. }
  1061. f = 1;
  1062. break;
  1063. }
  1064. }
  1065. if (!f) {
  1066. /*If the `node' doesn't have `from' in his r_nodes... let's add it */
  1067. setzero(&rnn, sizeof(map_rnode));
  1068. rnn.r_node = (int *) from;
  1069. rnn.trtt = trtt_ms;
  1070. rnode_add(node, &rnn);
  1071. node->flags |= MAP_UPDATE;
  1072. }
  1073. /* ok, now the kernel needs a refresh of the routing table */
  1074. if (node->flags & MAP_UPDATE) {
  1075. rnode_trtt_order(node);
  1076. if (node->links > MAXROUTES) {
  1077. /*
  1078. * If we have too many routes we purge the worst
  1079. * ones.
  1080. */
  1081. for (x = MAXROUTES; x < node->links; x++)
  1082. rnode_del(node, x);
  1083. }
  1084. debug(DBG_INSANE, "TRCR_STORE: krnl_update node %d",
  1085. tracer[i].node);
  1086. rt_update_node(0, node, 0, 0, 0, level);
  1087. node->flags &= ~MAP_UPDATE;
  1088. }
  1089. }
  1090. return 0;
  1091. }
  1092. /*
  1093. * flood_pkt_send: This functions is used to propagate packets, in a broadcast
  1094. * manner, in a entire gnode of a specified level.
  1095. * It sends the `pkt' to all the nodes excluding the excluded nodes. It knows
  1096. * if a node is excluded by calling the `is_node_excluded' function. The
  1097. * second argument to this function is the node who sent the pkt and it must be
  1098. * always excluded. The third argument is the position of the node being processed
  1099. * in the r_node array of me.cur_node. The other arguments are described in
  1100. * tracer.h.
  1101. * If `is_node_excluded' returns a non 0 value, the node is considered as excluded.
  1102. * The `from_rpos' argument is the node who sent the `pkt'.
  1103. * It returns the number of pkts sent or -1 on errors. Note that the total pkt sent
  1104. * should be == me.cur_node->links-the_excluded_nodes.
  1105. * Note that `level', `sub_id', and `from_rpos' are vars used only by
  1106. * is_node_excluded() (see tracer.h).
  1107. */
  1108. int
  1109. flood_pkt_send(int (*is_node_excluded) (TRACER_PKT_EXCLUDE_VARS),
  1110. u_char level, int sub_id, int from_rpos, PACKET pkt)
  1111. {
  1112. ext_rnode *e_rnode;
  1113. map_node *dst_node, *node;
  1114. ssize_t err;
  1115. const char *ntop;
  1116. int i, e = 0;
  1117. /*
  1118. * Forward the pkt to all our r_nodes (excluding the excluded;)
  1119. */
  1120. for (i = 0; i < me.cur_node->links; i++) {
  1121. node = (map_node *) me.cur_node->r_node[i].r_node;
  1122. if (node->flags & MAP_ERNODE) {
  1123. e_rnode = (ext_rnode *) node;
  1124. dst_node = (map_node *) e_rnode->quadg.gnode[_EL(level - 1)];
  1125. } else {
  1126. e_rnode = 0;
  1127. dst_node = node;
  1128. }
  1129. if (!dst_node)
  1130. continue;
  1131. if (is_node_excluded
  1132. (e_rnode, dst_node, from_rpos, i, level, sub_id))
  1133. continue;
  1134. /* Get the socket associated to the rnode */
  1135. if (rnl_fill_rq(node, &pkt) < 0)
  1136. continue;
  1137. if (server_opt.dbg_lvl)
  1138. debug(DBG_INSANE, "flood_pkt_send(0x%x): %s to %s"
  1139. " lvl %d", pkt.hdr.id,
  1140. rq_to_str(pkt.hdr.op), inet_to_str(pkt.to), level - 1);
  1141. /* Let's send the pkt */
  1142. err = rnl_send_rq(node, &pkt, 0, pkt.hdr.op, pkt.hdr.id, 0, 0, 0);
  1143. if (err == -1) {
  1144. ntop = inet_to_str(pkt.to);
  1145. error(ERROR_MSG "Cannot send the %s request"
  1146. " with id: %d to %s",
  1147. ERROR_FUNC, rq_to_str(pkt.hdr.op), pkt.hdr.id, ntop);
  1148. } else
  1149. e++;
  1150. }
  1151. pkt_free(&pkt, 0);
  1152. return e;
  1153. }
  1154. /* * * Exclude functions * * *
  1155. * These exclude function are used in conjunction with flood_pkt_send.
  1156. * They return 1 if the node has to be excluded, otherwise 0.
  1157. */
  1158. /*
  1159. * exclude_glevel: Exclude `node' if it doesn't belong to the gid (`excl_gid') of
  1160. * the level (`excl_level') specified.
  1161. */
  1162. int
  1163. exclude_glevel(TRACER_PKT_EXCLUDE_VARS)
  1164. {
  1165. /* If `node' is null we can exclude it, because it isn't a gnode
  1166. * of ours levels */
  1167. if (!node)
  1168. return 1;
  1169. /* Ehi, if the node isn't even an external rnode, we don't exclude it. */
  1170. if (!(node->flags & MAP_ERNODE))
  1171. return 0;
  1172. /* Reach the sky */
  1173. if (excl_level == me.cur_quadg.levels)
  1174. return 0;
  1175. return quadg_gids_cmp(e_rnode->quadg, me.cur_quadg, excl_level);
  1176. }
  1177. /* Exclude the `from' node */
  1178. int
  1179. exclude_from(TRACER_PKT_EXCLUDE_VARS)
  1180. {
  1181. if (pos == from_rpos)
  1182. return 1;
  1183. return 0;
  1184. }
  1185. /* Exclude all the nodes, except the from node */
  1186. int
  1187. exclude_all_but_notfrom(TRACER_PKT_EXCLUDE_VARS)
  1188. {
  1189. if (!exclude_from(TRACER_PKT_EXCLUDE_VARS_NAME))
  1190. return 1;
  1191. return 0;
  1192. }
  1193. int
  1194. exclude_from_and_glevel(TRACER_PKT_EXCLUDE_VARS)
  1195. {
  1196. if (exclude_glevel(TRACER_PKT_EXCLUDE_VARS_NAME) ||
  1197. exclude_from(TRACER_PKT_EXCLUDE_VARS_NAME))
  1198. return 1;
  1199. return 0;
  1200. }
  1201. /*
  1202. * tracer_pkt_recv: It receive a TRACER_PKT or a TRACER_PKT_CONNECT, analyzes
  1203. * the received pkt, adds the new entry in it and forward the pkt to all
  1204. * the r_nodes.
  1205. */
  1206. int
  1207. tracer_pkt_recv(PACKET rpkt)
  1208. {
  1209. PACKET pkt;
  1210. brdcast_hdr *bcast_hdr;
  1211. tracer_hdr *trcr_hdr;
  1212. tracer_chunk *tracer;
  1213. bnode_hdr *bhdr = 0;
  1214. map_node *from, *tracer_starter, *root_node;
  1215. map_gnode *gfrom;
  1216. quadro_group rip_quadg;
  1217. int (*exclude_function) (TRACER_PKT_EXCLUDE_VARS);
  1218. int ret_err, gid, real_from_rpos;
  1219. u_int hops;
  1220. size_t bblock_sz = 0, old_bblock_sz;
  1221. u_short old_bblocks_found = 0;
  1222. u_char level, orig_lvl;
  1223. const char *ntop = 0;
  1224. char *old_bblock = 0;
  1225. ret_err = tracer_unpack_pkt(rpkt, &bcast_hdr, &trcr_hdr, &tracer,
  1226. &bhdr, &bblock_sz, &rip_quadg,
  1227. &real_from_rpos);
  1228. if (ret_err) {
  1229. ntop = inet_to_str(rpkt.from);
  1230. debug(DBG_NOISE, "tracer_pkt_recv(): The %s sent an invalid "
  1231. "tracer_pkt here.", ntop);
  1232. return -1;
  1233. }
  1234. hops = trcr_hdr->hops;
  1235. gid = bcast_hdr->g_node;
  1236. level = orig_lvl = bcast_hdr->level;
  1237. if (!level || level == 1) {
  1238. level = 0;
  1239. root_node = me.cur_node;
  1240. from = node_from_pos(tracer[hops - 1].node, me.int_map);
  1241. tracer_starter = node_from_pos(tracer[0].node, me.int_map);
  1242. } else {
  1243. level--;
  1244. root_node = &me.cur_quadg.gnode[_EL(level)]->g;
  1245. gfrom =
  1246. gnode_from_pos(tracer[hops - 1].node, me.ext_map[_EL(level)]);
  1247. from = &gfrom->g;
  1248. tracer_starter =
  1249. (map_node *) gnode_from_pos(tracer[0].node,
  1250. me.ext_map[_EL(level)]);
  1251. }
  1252. if (server_opt.dbg_lvl) {
  1253. ntop = inet_to_str(rpkt.from);
  1254. debug(DBG_NOISE, "Tracer_pkt(0x%x, lvl %d) received from %s",
  1255. rpkt.hdr.id, level, ntop);
  1256. }
  1257. /*
  1258. * This is the check for the broadcast id. If it is <= tracer_starter->brdcast
  1259. * the pkt is an old broadcast that still dance around.
  1260. */
  1261. if (rpkt.hdr.id <= tracer_starter->brdcast) {
  1262. debug(DBG_NOISE, "tracer_pkt_recv(): Received from %s an old "
  1263. "tracer_pkt broadcast: 0x%x, cur: 0x%x", ntop,
  1264. rpkt.hdr.id, tracer_starter->brdcast);
  1265. return -1;
  1266. } else
  1267. tracer_starter->brdcast = rpkt.hdr.id;
  1268. /*
  1269. * Time to update our map
  1270. */
  1271. if (rpkt.hdr.op == TRACER_PKT) { /*This check is made because tracer_pkt_recv
  1272. handles also TRACER_PKT_CONNECT pkts */
  1273. ret_err = tracer_store_pkt(rpkt.from, &rip_quadg, level,
  1274. trcr_hdr, tracer, (void *) bhdr,
  1275. bblock_sz, &old_bblocks_found,
  1276. &old_bblock, &old_bblock_sz);
  1277. if (ret_err) {
  1278. ntop = inet_to_str(rpkt.from);
  1279. debug(DBG_NORMAL, "tracer_pkt_recv(): Cannot store the"
  1280. " tracer_pkt received from %s", ntop);
  1281. }
  1282. }
  1283. /*
  1284. * Drop the pkt if it is bound to the contigual qspn starters and we
  1285. * aren't a qspn_starter
  1286. */
  1287. if (bcast_hdr->flags & BCAST_TRACER_STARTERS &&
  1288. !(root_node->flags & QSPN_STARTER))
  1289. return 0;
  1290. /*The forge of the packet. */
  1291. if ((!level || ((me.cur_node->flags & MAP_BNODE) &&
  1292. (root_node->flags & MAP_BNODE))) &&
  1293. from != root_node) {
  1294. tracer_pkt_build(rpkt.hdr.op, rpkt.hdr.id, bcast_hdr->sub_id, /*IDs */
  1295. gid, level, bcast_hdr, trcr_hdr, tracer, /*Received tracer_pkt */
  1296. old_bblocks_found, old_bblock, old_bblock_sz, /*bnode_block */
  1297. &pkt); /*Where the pkt is built */
  1298. } else {
  1299. /* Increment the rtt of the last gnode chunk */
  1300. ret_err = tracer_add_rtt(real_from_rpos, tracer, hops - 1);
  1301. if (ret_err < 0)
  1302. debug(DBG_NOISE, "tracer_add_rtt(0x%x) hop %d failed",
  1303. rpkt.hdr.id, hops - 1);
  1304. pkt_copy(&pkt, &rpkt);
  1305. pkt_clear(&pkt);
  1306. }
  1307. /*... forward the tracer_pkt to our r_nodes */
  1308. exclude_function = exclude_from_and_glevel;
  1309. flood_pkt_send(exclude_function, orig_lvl, real_from_rpos,
  1310. real_from_rpos, pkt);
  1311. if (old_bblock)
  1312. xfree(old_bblock);
  1313. return 0;
  1314. }
  1315. /*
  1316. * tracer_pkt_start: It sends only a normal tracer_pkt. This is useful after
  1317. * the hook, to let all the other nodes know we are alive and to give them
  1318. * the right route.
  1319. */
  1320. int
  1321. tracer_pkt_start(u_char level)
  1322. {
  1323. PACKET pkt;
  1324. int root_node_pos;
  1325. if (tracer_pkt_start_mutex)
  1326. return 0;
  1327. else
  1328. tracer_pkt_start_mutex = 1;
  1329. if (!level || level == 1) {
  1330. level = 0;
  1331. root_node_pos = pos_from_node(me.cur_node, me.int_map);
  1332. } else
  1333. root_node_pos = pos_from_gnode(me.cur_quadg.gnode[_EL(level)],
  1334. me.ext_map[_EL(level)]);
  1335. me.cur_node->brdcast++;
  1336. tracer_pkt_build(TRACER_PKT, me.cur_node->brdcast, root_node_pos, /*IDs */
  1337. me.cur_quadg.gid[level + 1], level, /*GnodeID and level */
  1338. 0, 0, 0, /*Received tracer_pkt */
  1339. 0, 0, 0, /*bnode_block */
  1340. &pkt); /*Where the pkt is built */
  1341. /*Diffuse the packet in all the universe! */
  1342. debug(DBG_INSANE, "Tracer_pkt 0x%x starting.", pkt.hdr.id);
  1343. flood_pkt_send(exclude_from_and_glevel, level + 1, -1, -1, pkt);
  1344. tracer_pkt_start_mutex = 0;
  1345. return 0;
  1346. }