You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

radar.c 40KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723
  1. /* This file is part of Netsukuku
  2. * (c) Copyright 2005 Andrea Lo Pumo aka AlpT <alpt@freaknet.org>
  3. *
  4. * This source code is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as published
  6. * by the Free Software Foundation; either version 2 of the License,
  7. * or (at your option) any later version.
  8. *
  9. * This source code is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * Please refer to the GNU Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Public License along with
  15. * this source code; if not, write to:
  16. * Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. *
  18. * --
  19. *
  20. * radar.c
  21. *
  22. * The radar sends in broadcast a bouquet of MAX_RADAR_SCANS# packets and waits
  23. * for the ECHO_REPLY of the nodes which are alive. It then recollects the
  24. * replies and builds a small statistic, updates, if necessary, the internal
  25. * maps, the bnode maps and the qspn buffer.
  26. * A radar is fired periodically by the radar_daemon(), which is started as a
  27. * thread.
  28. */
  29. #include "includes.h"
  30. #include "llist.c"
  31. #include "endianness.h"
  32. #include "if.h"
  33. #include "bmap.h"
  34. #include "route.h"
  35. #include "request.h"
  36. #include "pkts.h"
  37. #include "qspn.h"
  38. #include "radar.h"
  39. #include "netsukuku.h"
  40. #include "common.h"
  41. pthread_attr_t radar_qspn_send_t_attr;
  42. void
  43. first_init_radar(void)
  44. {
  45. max_radar_wait = MAX_RADAR_WAIT;
  46. pthread_attr_init(&radar_qspn_send_t_attr);
  47. pthread_attr_setdetachstate(&radar_qspn_send_t_attr,
  48. PTHREAD_CREATE_DETACHED);
  49. /* register the radar's ops in the pkt_op_table */
  50. add_pkt_op(ECHO_ME, SKT_BCAST, ntk_udp_radar_port, radard);
  51. add_pkt_op(ECHO_REPLY, SKT_UDP, ntk_udp_radar_port, radar_recv_reply);
  52. rlist = (struct rnode_list *) clist_init(&rlist_counter);
  53. alwd_rnodes =
  54. (struct allowed_rnode *) clist_init(&alwd_rnodes_counter);
  55. radar_daemon_ctl = 0;
  56. init_radar();
  57. }
  58. void
  59. last_close_radar(void)
  60. {
  61. close_radar();
  62. rnl_reset(&rlist, &rlist_counter);
  63. }
  64. void
  65. init_radar(void)
  66. {
  67. hook_retry = 0;
  68. my_echo_id = 0;
  69. total_radar_scans = 0;
  70. setzero(radar_scans, sizeof(radar_scans));
  71. radar_scan_mutex = 0;
  72. radar_q = (struct radar_queue *) clist_init(&radar_q_counter);
  73. setzero(send_qspn_now, sizeof(u_char) * MAX_LEVELS);
  74. }
  75. void
  76. close_radar(void)
  77. {
  78. if (radar_q_counter)
  79. clist_destroy(&radar_q, &radar_q_counter);
  80. }
  81. void
  82. reset_radar(void)
  83. {
  84. if (me.cur_node->flags & MAP_HNODE) {
  85. free_new_node();
  86. rnl_reset(&rlist, &rlist_counter);
  87. }
  88. close_radar();
  89. init_radar();
  90. }
  91. /*
  92. * free_new_node
  93. *
  94. * frees all the temporary alloced rq->node structs used at the
  95. * hook time.
  96. */
  97. void
  98. free_new_node(void)
  99. {
  100. struct radar_queue *rq;
  101. rq = radar_q;
  102. list_for(rq)
  103. if (rq->node && ((int) rq->node != RADQ_EXT_RNODE)) {
  104. xfree(rq->node);
  105. rq->node = 0;
  106. }
  107. }
  108. /*
  109. * find_node_radar_q
  110. *
  111. * returns the first radar_queue struct which has the
  112. * rq->node pointer equal to `node'.
  113. */
  114. struct radar_queue *
  115. find_node_radar_q(map_node * node)
  116. {
  117. struct radar_queue *rq;
  118. rq = radar_q;
  119. list_for(rq)
  120. if (rq->node == node)
  121. return rq;
  122. return 0;
  123. }
  124. /*
  125. * find_ip_radar_q
  126. *
  127. * returns the first radar_queue struct which has the rq->ip
  128. * member equal to the given `ip'.
  129. */
  130. struct radar_queue *
  131. find_ip_radar_q(inet_prefix * ip)
  132. {
  133. struct radar_queue *rq;
  134. rq = radar_q;
  135. list_for(rq)
  136. if (!memcmp(rq->ip.data, ip->data, MAX_IP_SZ))
  137. return rq;
  138. return 0;
  139. }
  140. /*
  141. * rnl_add
  142. *
  143. * adds a new rnode_list struct in the `*rnlist' list. The new
  144. * allocated struct will be filled respectively with `rnode' and `dev'.
  145. * It returns the added `rnode_list' struct.
  146. */
  147. struct rnode_list *
  148. rnl_add(struct rnode_list **rnlist, int *rnlist_counter,
  149. map_node * rnode, interface * dev)
  150. {
  151. struct rnode_list *rnl;
  152. rnl = xzalloc(sizeof(struct rnode_list));
  153. rnl->node = (map_node *) rnode;
  154. rnl->dev[0] = dev;
  155. rnl->dev_n++;
  156. clist_add(rnlist, rnlist_counter, rnl);
  157. return rnl;
  158. }
  159. /*
  160. * rnl_del
  161. *
  162. * deletes the `rnl' struct from the `rnlist' rnode_list.
  163. * If `close_socket' is not zero, `rnl'->tcp_sk will be closed.
  164. */
  165. void
  166. rnl_del(struct rnode_list **rnlist, int *rnlist_counter,
  167. struct rnode_list *rnl, int close_socket)
  168. {
  169. if (rnl) {
  170. if (close_socket && rnl->tcp_sk)
  171. inet_close(&rnl->tcp_sk);
  172. clist_del(rnlist, rnlist_counter, rnl);
  173. }
  174. if (!(*rnlist_counter))
  175. *rnlist = 0;
  176. }
  177. /*
  178. * rnl_reset
  179. *
  180. * reset the whole rnode_list
  181. */
  182. void
  183. rnl_reset(struct rnode_list **rnlist, int *rnlist_counter)
  184. {
  185. struct rnode_list *rnl = *rnlist, *next;
  186. list_safe_for(rnl, next)
  187. rnl_del(rnlist, rnlist_counter, rnl, 1);
  188. *rnlist = (struct rnode_list *) clist_init(rnlist_counter);
  189. }
  190. /*
  191. * rnl_del_dead_rnode
  192. *
  193. * it removes all the rnode_list structs which are related
  194. * to a rnode which doesn't exist anymore in `root_node'
  195. * It returns the number of delete rnodes_list structs.
  196. */
  197. int
  198. rnl_del_dead_rnode(struct rnode_list **rnlist, int *rnlist_counter,
  199. map_node * root_node)
  200. {
  201. struct rnode_list *rnl = *rnlist, *next;
  202. int i = 0;
  203. list_safe_for(rnl, next)
  204. if (rnode_find(root_node, rnl->node) < 0) {
  205. rnl_del(rnlist, rnlist_counter, rnl, 1);
  206. i++;
  207. }
  208. return i;
  209. }
  210. /*
  211. * rnl_find_rpos
  212. *
  213. * returns the first rnode_list struct, contained in
  214. * `rnlist', which has rnl->node equal to `node'.
  215. */
  216. struct rnode_list *
  217. rnl_find_node(struct rnode_list *rnlist, map_node * node)
  218. {
  219. struct rnode_list *rnl = rnlist;
  220. list_for(rnl)
  221. if (rnl->node == node)
  222. return rnl;
  223. return 0;
  224. }
  225. /*
  226. * rnl_add_dev
  227. *
  228. * If `rnl' is 0 a new struct is added in `*rnlist' using `node'.
  229. * In both cases the `new_dev' is added in the rnl->dev[] array of
  230. * pointers (if it isn't already present there) and rnl->dev_n is
  231. * incremented.
  232. * On error -1 is returned.
  233. */
  234. int
  235. rnl_add_dev(struct rnode_list **rnlist, int *rnlist_counter,
  236. struct rnode_list *rnl, map_node * node, interface * new_dev)
  237. {
  238. int i;
  239. if (!rnl) {
  240. rnl = rnl_add(rnlist, rnlist_counter, node, new_dev);
  241. return 0;
  242. }
  243. if (rnl->dev_n >= MAX_INTERFACES)
  244. return -1;
  245. for (i = 0; i < rnl->dev_n; i++)
  246. if (rnl->dev[i] == new_dev)
  247. return 0;
  248. rnl->dev[rnl->dev_n++] = new_dev;
  249. return 0;
  250. }
  251. /*
  252. * rnl_del_dev
  253. *
  254. * It searches a pointer in the rnl->dev[] array equal to
  255. * `del_dev'. If it is found, it is set to 0 and rnl->dev_n is decremented,
  256. * otherwise 0 is returned.
  257. * If rnlist->dev_n is 0, the found rnlist struct is deleted from the llist.
  258. * On error -1 is returned.
  259. */
  260. int
  261. rnl_del_dev(struct rnode_list **rnlist, int *rnlist_counter,
  262. struct rnode_list *rnl, interface * del_dev)
  263. {
  264. int i;
  265. if (!rnl)
  266. return 0;
  267. if (rnl->dev_n <= 0)
  268. return -1;
  269. for (i = 0; i < rnl->dev_n; i++) {
  270. if (rnl->dev[i] == del_dev) {
  271. if (i == rnl->dev_n - 1)
  272. rnl->dev[i] = 0;
  273. else {
  274. rnl->dev[i] = rnl->dev[rnl->dev_n - 1];
  275. rnl->dev[rnl->dev_n - 1] = 0;
  276. }
  277. rnl->dev_n--;
  278. break;
  279. }
  280. }
  281. if (!rnl->dev_n)
  282. rnl_del(rnlist, rnlist_counter, rnl, 1);
  283. return 0;
  284. }
  285. /*
  286. * rnl_update_devs
  287. *
  288. * it updates the device array present in the rnode_list struct of `node'.
  289. * It searches in rnlist a struct which have rnlist->node == `node',
  290. * then it substitutes rnlist->dev with `devs' and rnlist->dev_n with `dev_n'.
  291. * If there is a difference between the new `devs' array and the old one, 1 is
  292. * returned.
  293. */
  294. int
  295. rnl_update_devs(struct rnode_list **rnlist, int *rnlist_counter,
  296. map_node * node, interface ** devs, int dev_n)
  297. {
  298. struct rnode_list *old_rnl, *new_rnl;
  299. int i, dev_pos, update = 0;
  300. old_rnl = rnl_find_node(*rnlist, node);
  301. if (!dev_n) {
  302. /*
  303. * The new `devs' array is empty, therefore delete old_rnl
  304. */
  305. rnl_del(rnlist, rnlist_counter, old_rnl, 1);
  306. return 0;
  307. }
  308. if (old_rnl)
  309. /*
  310. * Diff old_rnl->dev and `devs'
  311. */
  312. for (i = 0; i < dev_n; i++) {
  313. dev_pos = FIND_PTR(devs[i], old_rnl->dev, old_rnl->dev_n);
  314. if (dev_pos < 0) {
  315. update = 1;
  316. break;
  317. }
  318. } else if (!old_rnl)
  319. update = 1;
  320. if (update) {
  321. new_rnl = rnl_add(rnlist, rnlist_counter, node, devs[0]);
  322. for (i = 1; i < dev_n; i++)
  323. rnl_add_dev(rnlist, rnlist_counter, new_rnl, node, devs[i]);
  324. new_rnl->tcp_sk = (old_rnl) ? old_rnl->tcp_sk : 0;
  325. rnl_del(rnlist, rnlist_counter, old_rnl, 0);
  326. }
  327. return update;
  328. }
  329. interface **
  330. rnl_get_dev(struct rnode_list * rnlist, map_node * node)
  331. {
  332. struct rnode_list *rnl;
  333. rnl = rnl_find_node(rnlist, node);
  334. return !rnl ? 0 : rnl->dev;
  335. }
  336. interface *
  337. rnl_get_rand_dev(struct rnode_list * rnlist, map_node * node)
  338. {
  339. struct rnode_list *rnl;
  340. return !(rnl = rnl_find_node(rnlist, node)) ?
  341. 0 : rnl->dev[rand_range(0, rnl->dev_n - 1)];
  342. }
  343. /*
  344. * rnl_get_sk
  345. *
  346. * It returns the tcp socket associated to rnode `node'.
  347. * If the socket is set to zero, it tries to create a tcp connection to
  348. * `node' to the `ntk_tcp_port' port.
  349. *
  350. * On error -1 is returned.
  351. */
  352. int
  353. rnl_get_sk(struct rnode_list *rnlist, map_node * node)
  354. {
  355. struct rnode_list *rnl;
  356. if (!(rnl = rnl_find_node(rnlist, node)))
  357. return -1;
  358. if (!rnl->tcp_sk) {
  359. inet_prefix to;
  360. int i;
  361. if (me.cur_node->flags & MAP_HNODE) {
  362. struct radar_queue *rq;
  363. /* If we are hooking, get the IP from the radar
  364. * queue */
  365. if (!(rq = find_node_radar_q(rnl->node)))
  366. return -1;
  367. inet_copy(&to, &rq->ip);
  368. } else {
  369. rnodetoip((u_int) me.int_map, (u_int) node,
  370. me.cur_quadg.ipstart[1], &to);
  371. }
  372. /* Try to connect using the `i'th device. If it fails, try
  373. * another device */
  374. for (i = 0; i < rnl->dev_n && rnl->tcp_sk <= 0; i++)
  375. rnl->tcp_sk = pkt_tcp_connect(&to, ntk_tcp_port, rnl->dev[i]);
  376. /* If the socket is connected, set it to keepalive */
  377. if ((rnl->tcp_sk = (rnl->tcp_sk <= 0) ? 0 : rnl->tcp_sk))
  378. set_keepalive_sk(rnl->tcp_sk);
  379. }
  380. return rnl->tcp_sk > 0 ? rnl->tcp_sk : -1;
  381. }
  382. /*
  383. * rnl_set_sk
  384. *
  385. * It sets the socket associated to rnode `node' to `sk'
  386. */
  387. void
  388. rnl_set_sk(struct rnode_list *rnlist, map_node * node, int sk)
  389. {
  390. struct rnode_list *rnl;
  391. if (!(rnl = rnl_find_node(rnlist, node)))
  392. return;
  393. rnl->tcp_sk = sk;
  394. }
  395. /*
  396. * rnl_close_all_sk
  397. *
  398. * It closes all the opened tcp_sk of the `rnlist' llist
  399. */
  400. void
  401. rnl_close_all_sk(struct rnode_list *rnlist)
  402. {
  403. struct rnode_list *rnl = rnlist;
  404. list_for(rnl)
  405. if (rnl->tcp_sk)
  406. inet_close(&rnl->tcp_sk);
  407. }
  408. /*
  409. * rnl_fill_rq
  410. *
  411. * It sets the `pkt'->sk and `pkt'->to variables.
  412. * The `pkt'->sk is retrieved using rnl_get_sk()
  413. *
  414. * On error -1 is returned.
  415. */
  416. int
  417. rnl_fill_rq(map_node * rnode, PACKET * pkt)
  418. {
  419. int tries = 0;
  420. retry:
  421. if (!pkt->sk && (pkt->sk = rnl_get_sk(rlist, rnode)) <= 0) {
  422. error(ERROR_MSG "Couldn't get the socket associated "
  423. "to dst_rnode", ERROR_FUNC);
  424. return -1;
  425. }
  426. if (inet_getpeername(pkt->sk, &pkt->to, 0) < 0) {
  427. tries++;
  428. if (tries < 2)
  429. goto retry;
  430. return -1;
  431. }
  432. return 0;
  433. }
  434. /*
  435. * rnl_send_rq
  436. *
  437. * It is a wrapper to send_rq. It is used to send or receive a packet to/from
  438. * the specified `rnode'.
  439. *
  440. * On error -1 is returned.
  441. *
  442. * Note: the pkt->sk must not be closed.
  443. */
  444. int
  445. rnl_send_rq(map_node * rnode,
  446. PACKET * pkt, int pkt_flags, u_char rq, int rq_id, u_char re,
  447. int check_ack, PACKET * rpkt)
  448. {
  449. int ret, tries = 0;
  450. retry:
  451. if (!pkt->sk && rnl_fill_rq(rnode, pkt) < 0)
  452. return -1;
  453. ret = send_rq(pkt, pkt_flags, rq, rq_id, re, check_ack, rpkt);
  454. if ((ret == SEND_RQ_ERR_CONNECT || ret == SEND_RQ_ERR_SEND ||
  455. ret == SEND_RQ_ERR_RECV)) {
  456. /* The socket has been corrupted, set it to 0 and try again */
  457. inet_close(&pkt->sk);
  458. rnl_set_sk(rlist, rnode, 0);
  459. tries++;
  460. if (tries < 2)
  461. goto retry;
  462. }
  463. return ret;
  464. }
  465. /*
  466. * is_rnode_allowed
  467. *
  468. * it verifies if the rnode described by the `rip' IP is
  469. * present in the `alr' llist. If it is 1 is returned, otherwise 0.
  470. */
  471. int
  472. is_rnode_allowed(inet_prefix rip, struct allowed_rnode *alr)
  473. {
  474. int i, e, gid[MAX_LEVELS];
  475. iptogids(&rip, gid, FAMILY_LVLS);
  476. list_for(alr) {
  477. for (e = 0, i = alr->min_level; i < alr->tot_level; i++)
  478. if (gid[i] != alr->gid[i]) {
  479. e = 1;
  480. break;
  481. }
  482. if (!e)
  483. return 1;
  484. }
  485. return 0;
  486. }
  487. /*
  488. * new_rnode_allowed
  489. *
  490. * add a new allowed rnode in the `alr' llist which has
  491. * already `*alr_counter' members. `gid', `min_lvl', and `tot_lvl' are the
  492. * respective field of the new allowed_rnode struct.
  493. */
  494. void
  495. new_rnode_allowed(struct allowed_rnode **alr, int *alr_counter,
  496. int *gid, int min_lvl, int tot_lvl)
  497. {
  498. struct allowed_rnode *new_alr;
  499. new_alr = xmalloc(sizeof(struct allowed_rnode));
  500. new_alr->min_level = min_lvl;
  501. new_alr->tot_level = tot_lvl;
  502. setzero(new_alr->gid, sizeof(int) * MAX_LEVELS);
  503. memcpy(&new_alr->gid[min_lvl], &gid[min_lvl],
  504. sizeof(int) * (tot_lvl - min_lvl));
  505. debug(DBG_SOFT,
  506. "new_rnode_allowed: %d, %d, %d, %d. min_lvl: %d, tot_lvl: %d",
  507. gid[0], gid[1], gid[2], gid[3], min_lvl, tot_lvl);
  508. clist_add(alr, alr_counter, new_alr);
  509. }
  510. void
  511. reset_rnode_allowed(struct allowed_rnode **alr, int *alr_counter)
  512. {
  513. if (*alr)
  514. list_destroy((*alr));
  515. *alr = (struct allowed_rnode *) clist_init(alr_counter);
  516. }
  517. /*
  518. * count_hooking_nodes
  519. *
  520. * returns the number of hooking nodes, which are stored
  521. * in the radar_queue.
  522. */
  523. int
  524. count_hooking_nodes(void)
  525. {
  526. struct radar_queue *rq;
  527. int total_hooking_nodes = 0;
  528. rq = radar_q;
  529. list_for(rq) {
  530. if (!rq->node)
  531. continue;
  532. if (rq->node->flags & MAP_HNODE)
  533. total_hooking_nodes++;
  534. }
  535. return total_hooking_nodes;
  536. }
  537. /*
  538. * final_radar_queue
  539. *
  540. * analyses the received ECHO_REPLY pkt and write the
  541. * average rtt of each found node in the radar_queue.
  542. */
  543. void
  544. final_radar_queue(void)
  545. {
  546. struct radar_queue *rq;
  547. int e;
  548. struct timeval sum;
  549. u_int f_rtt;
  550. setzero(&sum, sizeof(struct timeval));
  551. rq = radar_q;
  552. list_for(rq) {
  553. if (!rq->node)
  554. continue;
  555. /* Sum the rtt of all the received pongs */
  556. for (e = 0; e < rq->pongs; e++)
  557. timeradd(&rq->rtt[e], &sum, &sum);
  558. /* Add penality rtt for each pong lost */
  559. for (; e < MAX_RADAR_SCANS; e++)
  560. timeradd(&rq->rtt[e - rq->pongs], &sum, &sum);
  561. f_rtt = MILLISEC(sum) / MAX_RADAR_SCANS;
  562. MILLISEC_TO_TV(f_rtt, rq->final_rtt);
  563. }
  564. my_echo_id = 0;
  565. }
  566. /*
  567. * radar_remove_old_rnodes
  568. *
  569. * It removes all the old rnodes ^_- It store in rnode_delete[level] the number
  570. * of deleted rnodes. This function is used by radar_update_map
  571. */
  572. int
  573. radar_remove_old_rnodes(char *rnode_deleted)
  574. {
  575. map_node *node, *root_node, *broot_node;
  576. map_gnode *gnode;
  577. map_bnode *bnode;
  578. ext_rnode *e_rnode = 0;
  579. ext_rnode_cache *erc;
  580. struct qspn_buffer *qb;
  581. struct rnode_list *rnl;
  582. int i, e, node_pos, bm, rnode_pos, bnode_rnode_pos, root_node_pos;
  583. int broot_node_pos;
  584. int level, blevel, external_node, total_levels, first_level;
  585. void *void_map, *void_gnode;
  586. if (!me.cur_node->links)
  587. return 0;
  588. for (i = 0; i < me.cur_node->links; i++) {
  589. node = (map_node *) me.cur_node->r_node[i].r_node;
  590. if (!(node->flags & MAP_VOID))
  591. /* The rnode is not really dead! */
  592. continue;
  593. if (node->flags & MAP_ERNODE) {
  594. e_rnode = (ext_rnode *) node;
  595. external_node = 1;
  596. total_levels = e_rnode->quadg.levels;
  597. first_level = 1;
  598. quadg_setflags(&e_rnode->quadg, MAP_VOID);
  599. } else {
  600. external_node = 0;
  601. total_levels = 1;
  602. first_level = 0;
  603. }
  604. for (level = first_level; level < total_levels; level++) {
  605. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  606. blevel = level - 1;
  607. /* delete the rnode from the rnode_list */
  608. rnl = rnl_find_node(rlist, node);
  609. rnl_del(&rlist, &rlist_counter, rnl, 1);
  610. /*
  611. * Just delete it from all the maps.
  612. */
  613. if (!level && !external_node) {
  614. void_map = me.int_map;
  615. node_pos = pos_from_node(node, me.int_map);
  616. rnode_pos = i;
  617. debug(DBG_NORMAL, "radar: The node %d is dead", node_pos);
  618. /* delete it from the int_map and update the gcount */
  619. map_node_del(node);
  620. qspn_dec_gcount((int *) qspn_gnode_count, level + 1, 1);
  621. /* delete the route */
  622. rt_update_node(0, node, 0, 0, 0, level);
  623. send_qspn_now[level] = 1;
  624. } else {
  625. void_map = me.ext_map;
  626. gnode = e_rnode->quadg.gnode[_EL(level)];
  627. /** delete the direct route to the ext_node */
  628. if (level == 1)
  629. rt_update_node(&e_rnode->quadg.ipstart[0],
  630. e_rnode, 0, 0, 0, /*level=0 */ 0);
  631. /**/ void_gnode = (void *) gnode;
  632. if (!void_gnode)
  633. continue;
  634. node_pos = pos_from_gnode(gnode, me.ext_map[_EL(level)]);
  635. rnode_pos = g_rnode_find((map_gnode *) root_node, gnode);
  636. debug(DBG_NORMAL, "The ext_node (gid %d, lvl %d) is"
  637. " dead", e_rnode->quadg.gid[level], level);
  638. /* bnode_map update */
  639. for (e = 0; blevel >= 0; blevel--) {
  640. qspn_set_map_vars(blevel, 0, &broot_node,
  641. &broot_node_pos, 0);
  642. bm = map_find_bnode(me.bnode_map[blevel],
  643. me.bmap_nodes[blevel],
  644. broot_node_pos);
  645. if (bm == -1)
  646. continue;
  647. bnode = &me.bnode_map[blevel][bm];
  648. bnode_rnode_pos = rnode_find(bnode,
  649. (map_node *) e_rnode->
  650. quadg.gnode[_EL(level)]);
  651. if (bnode_rnode_pos != -1)
  652. rnode_del(bnode, bnode_rnode_pos);
  653. if (!bnode->links) {
  654. me.bnode_map[blevel] =
  655. map_bnode_del(me.bnode_map[blevel],
  656. &me.bmap_nodes[blevel], bnode);
  657. broot_node->flags &= ~MAP_BNODE;
  658. } else
  659. e = 1;
  660. }
  661. if (!e) /* We are no more a bnode */
  662. me.cur_node->flags &= ~MAP_BNODE;
  663. /* If we were the only bnode which bordered on
  664. * `gnode', delete it from the map */
  665. if (map_find_bnode_rnode
  666. (me.bnode_map[level - 1], me.bmap_nodes[level - 1],
  667. gnode) == -1) {
  668. qspn_dec_gcount((int *) qspn_gnode_count, level + 1,
  669. gnode->gcount);
  670. gmap_node_del(gnode);
  671. gnode_dec_seeds(&me.cur_quadg, level); /* update the seeds */
  672. }
  673. /* Delete the entries from the routing table */
  674. rt_update_node(0, 0, &e_rnode->quadg, 0, 0, level);
  675. send_qspn_now[level] = 1;
  676. }
  677. if (rnode_pos >= 0 && root_node->links > 0)
  678. rnode_del(root_node, rnode_pos);
  679. if (!root_node->links) {
  680. /* We are alone in the dark. Sigh. */
  681. qspn_time_reset(level, level, FAMILY_LVLS);
  682. } else if (!external_node)
  683. erc_update_rnodepos(me.cur_erc, root_node, rnode_pos);
  684. /* Now we delete it from the qspn_buffer */
  685. if (qspn_b[level]) {
  686. qb = qspn_b[level];
  687. qb = qspn_b_find_rnode(qb, node);
  688. if (qb)
  689. qspn_b[level] = list_del(qspn_b[level], qb);
  690. }
  691. SET_BIT(rnode_deleted, level);
  692. }
  693. /*
  694. * Kick out the external_node from the root_node and destroy it
  695. * from the ext_rnode_cache
  696. */
  697. if (external_node) {
  698. /* external rnode cache update */
  699. erc = erc_find(me.cur_erc, e_rnode);
  700. if (erc)
  701. e_rnode_del(&me.cur_erc, &me.cur_erc_counter, erc);
  702. rnode_del(me.cur_node, i);
  703. }
  704. /* If the rnode we deleted from the root_node was swapped with
  705. * the last rnodes, we have to inspect again the same
  706. * root_node->r_node[ `i' ] rnode, because now it is another
  707. * rnode */
  708. if (i != (me.cur_node->links + 1) - 1)
  709. i--;
  710. }
  711. if (!me.cur_node->links) {
  712. /* - Diary -
  713. * Tue Mar 14 07:29:58 CET 2006
  714. * Damn! All my rnodes died, I am the last survivor in this
  715. * great lone land... I have to reset my memory... farewell!
  716. */
  717. qspn_reset_counters(FAMILY_LVLS);
  718. }
  719. return 0;
  720. }
  721. /*
  722. * radar_update_bmap
  723. *
  724. * updates the bnode map of the given `level' the root_node bnode in the bmap
  725. * will also point to the gnode of level `gnode_level'+1 that is
  726. * `rq'->quadg.gnode[_EL(gnode_level+1)].
  727. */
  728. void
  729. radar_update_bmap(struct radar_queue *rq, int level, int gnode_level)
  730. {
  731. map_gnode *gnode;
  732. map_node *root_node;
  733. map_rnode *rnode, rn;
  734. int bm, rnode_pos, root_node_pos;
  735. void *void_map;
  736. if (level == me.cur_quadg.levels - 1)
  737. return;
  738. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  739. void_map = me.ext_map;
  740. gnode = rq->quadg.gnode[_EL(gnode_level + 1)];
  741. bm = map_find_bnode(me.bnode_map[level], me.bmap_nodes[level],
  742. root_node_pos);
  743. if (bm == -1) {
  744. bm = map_add_bnode(&me.bnode_map[level], &me.bmap_nodes[level],
  745. root_node_pos, 0);
  746. rnode_pos = -1;
  747. } else
  748. rnode_pos = rnode_find(&me.bnode_map[level][bm], &gnode->g);
  749. if (rnode_pos == -1) {
  750. setzero(&rn, sizeof(map_rnode));
  751. rn.r_node = (int *) &gnode->g;
  752. rnode_add(&me.bnode_map[level][bm], &rn);
  753. rnode_pos = 0;
  754. }
  755. rnode = &me.bnode_map[level][bm].r_node[rnode_pos];
  756. rnode->trtt = MILLISEC(rq->final_rtt);
  757. }
  758. /*
  759. * radar_update_map
  760. *
  761. * it updates the int_map and the ext_map if any bnodes are found.
  762. * Note that the rnodes in the map are held in a different way. First of all the qspn
  763. * is not applied to them (we already know how to reach them ;) and they have only
  764. * one rnode... ME. So me.cur_node->r_node[x].r_node->r_node[0] == me.cur_node.
  765. * Gotcha?
  766. */
  767. void
  768. radar_update_map(void)
  769. {
  770. struct qspn_buffer *qb;
  771. struct radar_queue *rq;
  772. ext_rnode_cache *erc;
  773. map_gnode *gnode = 0;
  774. map_node *node, *root_node;
  775. map_rnode rnn, *new_root_rnode;
  776. ext_rnode *e_rnode;
  777. int i, diff, rnode_pos;
  778. u_char rnode_added[MAX_LEVELS / 8], rnode_deleted[MAX_LEVELS / 8];
  779. int level, external_node, total_levels, root_node_pos, node_update;
  780. void *void_map;
  781. const char *ntop;
  782. char updated_rnodes, routes_update, devs_update;
  783. updated_rnodes = routes_update = devs_update = 0;
  784. setzero(rnode_added, sizeof(rnode_added));
  785. setzero(rnode_deleted, sizeof(rnode_deleted));
  786. /**
  787. * Let's consider all our rnodes void, in this way we'll know what
  788. * rnodes will remain void after the update.
  789. */
  790. for (i = 0; i < me.cur_node->links; i++) {
  791. node = (map_node *) me.cur_node->r_node[i].r_node;
  792. node->flags |= MAP_VOID | MAP_UPDATE;
  793. }
  794. /**/ rq = radar_q;
  795. list_for(rq) {
  796. if (!rq->node)
  797. continue;
  798. if (!(me.cur_node->flags & MAP_HNODE) && (rq->flags & MAP_HNODE))
  799. continue;
  800. /*
  801. * We need to know if it is a node which is not in the gnode
  802. * where we are (external_rnode).
  803. */
  804. if ((int) rq->node == RADQ_EXT_RNODE) {
  805. external_node = 1;
  806. total_levels = rq->quadg.levels;
  807. } else {
  808. external_node = 0;
  809. total_levels = 1;
  810. }
  811. for (level = total_levels - 1; level >= 0; level--) {
  812. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  813. node_update = devs_update = 0;
  814. if (!level) {
  815. void_map = me.int_map;
  816. node = rq->node;
  817. } else {
  818. /* Skip the levels where the ext_rnode belongs
  819. * to our same gids */
  820. if (!quadg_gids_cmp(rq->quadg, me.cur_quadg, level))
  821. continue;
  822. /* Update only the gnodes which belongs to
  823. * our same gid of the upper level, because
  824. * we don't keep the internal info of the
  825. * extern gnodes. */
  826. if ((level < rq->quadg.levels - 1) &&
  827. quadg_gids_cmp(rq->quadg, me.cur_quadg, level + 1)) {
  828. rq->quadg.gnode[_EL(level)] = 0;
  829. continue;
  830. }
  831. /* Ehi, we are a bnode */
  832. root_node->flags |= MAP_BNODE;
  833. me.cur_node->flags |= MAP_BNODE;
  834. void_map = me.ext_map;
  835. gnode = rq->quadg.gnode[_EL(level)];
  836. node = &gnode->g;
  837. }
  838. if (external_node && !level && me.cur_erc_counter) {
  839. erc = e_rnode_find(me.cur_erc, &rq->quadg, 0);
  840. if (!erc)
  841. rnode_pos = -1;
  842. else {
  843. rnode_pos = erc->rnode_pos;
  844. node = (map_node *) erc->e;
  845. }
  846. } else
  847. rnode_pos = rnode_find(root_node, node);
  848. if (rnode_pos == -1) { /* W00t, we've found a new rnode! */
  849. node_update = 1;
  850. rnode_pos = root_node->links;
  851. ntop = inet_to_str(rq->quadg.ipstart[level]);
  852. if (server_opt.dbg_lvl || !level)
  853. loginfo
  854. ("Radar: New node found: %s, ext: %d, level: %d",
  855. ntop, external_node, level);
  856. if (external_node && !level) {
  857. /*
  858. * If this node we are processing is external, at level 0,
  859. * in the root_node's rnodes we add a rnode which point
  860. * to a ext_rnode struct.
  861. */
  862. setzero(&rnn, sizeof(map_rnode));
  863. e_rnode = xzalloc(sizeof(ext_rnode));
  864. memcpy(&e_rnode->quadg, &rq->quadg,
  865. sizeof(quadro_group));
  866. e_rnode->node.flags =
  867. MAP_BNODE | MAP_GNODE | MAP_RNODE | MAP_ERNODE;
  868. rnn.r_node = (int *) e_rnode;
  869. node = rq->node = &e_rnode->node;
  870. new_root_rnode = &rnn;
  871. /* Update the external_rnode_cache list */
  872. e_rnode_add(&me.cur_erc, e_rnode, rnode_pos,
  873. &me.cur_erc_counter);
  874. } else {
  875. /*We purge all the node's rnodes. */
  876. rnode_destroy(node);
  877. /*
  878. * This node has only one rnode,
  879. * and that is the root_node.
  880. */
  881. setzero(&rnn, sizeof(map_rnode));
  882. rnn.r_node = (int *) root_node;
  883. rnode_add(node, &rnn);
  884. /* It is a border node */
  885. if (level)
  886. node->flags |= MAP_BNODE | MAP_GNODE;
  887. node->flags |= MAP_RNODE;
  888. /*
  889. * Fill the rnode to be added in the
  890. * root_node.
  891. */
  892. setzero(&rnn, sizeof(map_rnode));
  893. rnn.r_node = (int *) node;
  894. new_root_rnode = &rnn;
  895. }
  896. /*
  897. * The new node is added in the root_node's
  898. * rnodes.
  899. */
  900. rnode_add(root_node, new_root_rnode);
  901. /* Update the qspn_buffer */
  902. if (!external_node || level) {
  903. qb = xzalloc(sizeof(struct qspn_buffer));
  904. qb->rnode = node;
  905. qspn_b[level] = list_add(qspn_b[level], qb);
  906. send_qspn_now[level] = 1;
  907. }
  908. /* If the new rnode wasn't present in the map,
  909. * then it is also a new node in the map, so
  910. * update the seeds counter too */
  911. if (!level && !external_node && (node->flags & MAP_VOID)) {
  912. gnode_inc_seeds(&me.cur_quadg, level);
  913. qspn_inc_gcount(qspn_gnode_count, level + 1, 1);
  914. }
  915. SET_BIT(rnode_added, level);
  916. } else {
  917. /*
  918. * Nah, We have the node in the map. Let's see if
  919. * its rtt is changed
  920. */
  921. if (!send_qspn_now[level] && node->links) {
  922. diff = abs(root_node->r_node[rnode_pos].trtt -
  923. MILLISEC(rq->final_rtt));
  924. if (diff >= RTT_DELTA) {
  925. node_update = 1;
  926. send_qspn_now[level] = 1;
  927. debug(DBG_NOISE, "node %s rtt changed, diff: %d",
  928. inet_to_str(rq->ip), diff);
  929. }
  930. }
  931. }
  932. /* Restore the flags */
  933. if (level)
  934. gnode->flags &= ~GMAP_VOID;
  935. node->flags &= ~MAP_VOID & ~MAP_UPDATE & ~QSPN_OLD;
  936. /*
  937. * Update the devices list of the rnode
  938. */
  939. if (!level) {
  940. devs_update = rnl_update_devs(&rlist, &rlist_counter,
  941. node, rq->dev, rq->dev_n);
  942. if (devs_update)
  943. routes_update++;
  944. }
  945. /* Nothing is really changed */
  946. if (!node_update)
  947. continue;
  948. /* Update the rtt */
  949. root_node->r_node[rnode_pos].trtt = MILLISEC(rq->final_rtt);
  950. /* Bnode map stuff */
  951. if (external_node && level) {
  952. /*
  953. * All the root_node bnodes which are in the
  954. * bmaps of level smaller than `level' points to
  955. * the same gnode which is rq->quadg.gnode[_EL(level-1+1)].
  956. * This is because the inferior levels cannot
  957. * have knowledge about the bordering gnode
  958. * which is in an upper level, but it's necessary that
  959. * they know which who the root_node borders on,
  960. * so the get_route algorithm can descend to
  961. * the inferior levels and it will still know
  962. * what is the border node which is linked
  963. * to the target gnode.
  964. */
  965. for (i = 0; i < level; i++)
  966. radar_update_bmap(rq, i, level - 1);
  967. send_qspn_now[level - 1] = 1;
  968. }
  969. if (node_update || devs_update)
  970. node->flags |= MAP_UPDATE;
  971. } /*for(level=0, ...) */
  972. updated_rnodes++;
  973. } /*list_for(rq) */
  974. /* Burn the deads */
  975. if (updated_rnodes < me.cur_node->links)
  976. radar_remove_old_rnodes((char *) rnode_deleted);
  977. /* <<keep your room tidy... order, ORDER>> */
  978. if (!is_bufzero(rnode_added, sizeof(rnode_added)) ||
  979. !is_bufzero(rnode_deleted, sizeof(rnode_deleted))) {
  980. /***
  981. * qsort the rnodes of me.cur_node and me.cur_quadg comparing
  982. * their trtt */
  983. rnode_trtt_order(me.cur_node);
  984. for (i = 1; i < me.cur_quadg.levels; i++)
  985. if (TEST_BIT(rnode_added, i) || TEST_BIT(rnode_deleted, i))
  986. rnode_trtt_order(&me.cur_quadg.gnode[_EL(i)]->g);
  987. /**/
  988. /* adjust the rnode_pos variables in the ext_rnode_cache list */
  989. erc_reorder_rnodepos(&me.cur_erc, &me.cur_erc_counter,
  990. me.cur_node);
  991. }
  992. /* Give a refresh to the kernel */
  993. if ((!is_bufzero(rnode_added, sizeof(rnode_added)) ||
  994. routes_update) && !(me.cur_node->flags & MAP_HNODE))
  995. rt_rnodes_update(1);
  996. }
  997. /*
  998. * add_radar_q
  999. *
  1000. * It returns the radar_q struct which handles the pkt.from node.
  1001. * If the node is not present in the radar_q, it is added, and the
  1002. * relative struct will be returned.
  1003. */
  1004. struct
  1005. radar_queue *
  1006. add_radar_q(PACKET pkt)
  1007. {
  1008. map_node *rnode;
  1009. quadro_group quadg;
  1010. struct radar_queue *rq;
  1011. u_int ret = 0;
  1012. int dev_pos;
  1013. if (me.cur_node->flags & MAP_HNODE) {
  1014. /*
  1015. * We are hooking, we haven't yet an int_map, an ext_map,
  1016. * a stable ip, so we create fake nodes that will be delete after
  1017. * the hook.
  1018. */
  1019. if (!(rq = find_ip_radar_q(&pkt.from))) {
  1020. map_rnode rnn;
  1021. rnode = xmalloc(sizeof(map_node));
  1022. setzero(rnode, sizeof(map_node));
  1023. setzero(&rnn, sizeof(map_rnode));
  1024. rnn.r_node = (int *) me.cur_node;
  1025. rnode_add(rnode, &rnn);
  1026. } else
  1027. rnode = rq->node;
  1028. }
  1029. iptoquadg(pkt.from, me.ext_map, &quadg,
  1030. QUADG_GID | QUADG_GNODE | QUADG_IPSTART);
  1031. if (!(me.cur_node->flags & MAP_HNODE)) {
  1032. iptomap((u_int) me.int_map, pkt.from, me.cur_quadg.ipstart[1],
  1033. &rnode);
  1034. ret = quadg_gids_cmp(me.cur_quadg, quadg, 1);
  1035. }
  1036. if (!ret)
  1037. rq = find_node_radar_q(rnode);
  1038. else
  1039. rq = find_ip_radar_q(&pkt.from);
  1040. if (!rq) {
  1041. /*
  1042. * If pkt.from isn't already in the queue, add it.
  1043. */
  1044. rq = xzalloc(sizeof(struct radar_queue));
  1045. if (ret)
  1046. rq->node = (map_node *) RADQ_EXT_RNODE;
  1047. else {
  1048. rq->node = rnode;
  1049. /* This pkt has been sent from another hooking
  1050. * node, let's remember this. */
  1051. if (pkt.hdr.flags & HOOK_PKT)
  1052. rq->node->flags |= MAP_HNODE;
  1053. }
  1054. if (pkt.hdr.flags & HOOK_PKT)
  1055. rq->flags |= MAP_HNODE;
  1056. inet_copy(&rq->ip, &pkt.from);
  1057. memcpy(&rq->quadg, &quadg, sizeof(quadro_group));
  1058. rq->dev[0] = pkt.dev;
  1059. rq->dev_n++;
  1060. clist_add(&radar_q, &radar_q_counter, rq);
  1061. } else {
  1062. /*
  1063. * Check if the input device is in the rq->dev array,
  1064. * if not add it.
  1065. */
  1066. if (rq->dev_n < MAX_INTERFACES) {
  1067. dev_pos = FIND_PTR(pkt.dev, rq->dev, rq->dev_n);
  1068. if (dev_pos < 0)
  1069. rq->dev[rq->dev_n++] = pkt.dev;
  1070. }
  1071. }
  1072. return rq;
  1073. }
  1074. /*
  1075. * radar_exec_reply
  1076. *
  1077. * It reads the received ECHO_REPLY pkt and updates the radar
  1078. * queue, storing the calculated rtt and the other infos relative to the sender
  1079. * node.
  1080. */
  1081. int
  1082. radar_exec_reply(PACKET pkt)
  1083. {
  1084. struct timeval t;
  1085. struct radar_queue *rq;
  1086. u_int rtt_ms = 0;
  1087. int dev_pos;
  1088. gettimeofday(&t, 0);
  1089. /*
  1090. * Get the radar_queue struct relative to pkt.from
  1091. */
  1092. rq = add_radar_q(pkt);
  1093. dev_pos = ifs_get_pos(me.cur_ifs, me.cur_ifs_n, pkt.dev);
  1094. if (dev_pos < 0)
  1095. debug(DBG_NORMAL, "The 0x%x ECHO_REPLY pkt was received by a non "
  1096. "existent interface", pkt.hdr.id);
  1097. if (me.cur_node->flags & MAP_HNODE) {
  1098. if (pkt.hdr.flags & HOOK_PKT) {
  1099. u_char scanning;
  1100. memcpy(&scanning, pkt.msg, sizeof(u_char));
  1101. /*
  1102. * If the pkt.from node has finished his scan, and we
  1103. * never received one of its ECHO_ME pkts, and we are
  1104. * still scanning, set the hook_retry.
  1105. */
  1106. if (!scanning && !rq->pings &&
  1107. (radar_scan_mutex ||
  1108. radar_scans[dev_pos] <= MAX_RADAR_SCANS)) {
  1109. hook_retry = 1;
  1110. }
  1111. }
  1112. }
  1113. if (rq->pongs < radar_scans[dev_pos]) {
  1114. timersub(&t, &scan_start, &rq->rtt[(int) rq->pongs]);
  1115. /*
  1116. * Now we divide the rtt, because (t - scan_start) is the time
  1117. * the pkt used to reach B from A and to return to A from B
  1118. */
  1119. rtt_ms = MILLISEC(rq->rtt[(int) rq->pongs]) / 2;
  1120. MILLISEC_TO_TV(rtt_ms, rq->rtt[(int) rq->pongs]);
  1121. rq->pongs++;
  1122. }
  1123. return 0;
  1124. }
  1125. /*
  1126. * radar_recv_reply
  1127. *
  1128. * It handles the ECHO_REPLY pkts
  1129. */
  1130. int
  1131. radar_recv_reply(PACKET pkt)
  1132. {
  1133. if (!my_echo_id || !radar_scan_mutex || !total_radar_scans)
  1134. return -1;
  1135. if (pkt.hdr.id != my_echo_id) {
  1136. debug(DBG_NORMAL, "I received an ECHO_REPLY with id: 0x%x, but "
  1137. "my current ECHO_ME is 0x%x", pkt.hdr.id, my_echo_id);
  1138. return -1;
  1139. }
  1140. /*
  1141. * If the `alwd_rnodes_counter' counter isn't zero, verify that
  1142. * `pkt.from' is an allowed rnode, otherwise drop this pkt
  1143. */
  1144. if (alwd_rnodes_counter && !is_rnode_allowed(pkt.from, alwd_rnodes)) {
  1145. debug(DBG_INSANE, "Filtering 0x%x ECHO_REPLY", pkt.hdr.id);
  1146. return -1;
  1147. }
  1148. /*
  1149. * If the rnode is in restricted mode and we are not, drop the pkt.
  1150. * If we are in restricted mode and the rnode isn't, drop the pkt
  1151. */
  1152. if ((pkt.hdr.flags & RESTRICTED_PKT && !restricted_mode) ||
  1153. (!(pkt.hdr.flags & RESTRICTED_PKT) && restricted_mode))
  1154. return -1;
  1155. return radar_exec_reply(pkt);
  1156. }
  1157. /*
  1158. * radar_qspn_send_t
  1159. *
  1160. * This function is used only by radar_scan().
  1161. * It just call the qspn_send() function. We use a thread
  1162. * because the qspn_send() may sleep, and we don't want to halt the
  1163. * radar_scan().
  1164. */
  1165. void *
  1166. radar_qspn_send_t(void *level)
  1167. {
  1168. int *p;
  1169. u_char i;
  1170. p = (int *) level;
  1171. i = (u_char) * p;
  1172. xfree(p);
  1173. qspn_send(i);
  1174. return NULL;
  1175. }
  1176. /*
  1177. * radar_scan
  1178. *
  1179. * It starts the scan of the local area.
  1180. *
  1181. * It sends MAX_RADAR_SCANS packets in broadcast then it waits MAX_RADAR_WAIT
  1182. * and in the while the echo replies are gathered. After MAX_RADAR_WAIT it
  1183. * stops to receive echo replies and it does a statistical analysis of the
  1184. * gathered echo replies, it updates the r_nodes in the map and sends a qspn
  1185. * round if something is changed in the map and if the `activate_qspn' argument
  1186. * is non zero.
  1187. *
  1188. * It returns 1 if another radar_scan is in progress, -1 if something went
  1189. * wrong, 0 on success.
  1190. */
  1191. int
  1192. radar_scan(int activate_qspn)
  1193. {
  1194. pthread_t thread;
  1195. PACKET pkt;
  1196. int i, d, *p;
  1197. ssize_t err;
  1198. u_char echo_scan;
  1199. /* We are already doing a radar scan, that's not good */
  1200. if (radar_scan_mutex)
  1201. return 1;
  1202. radar_scan_mutex = 1;
  1203. /*
  1204. * We create the PACKET
  1205. */
  1206. setzero(&pkt, sizeof(PACKET));
  1207. inet_setip_bcast(&pkt.to, my_family);
  1208. my_echo_id = rand();
  1209. gettimeofday(&scan_start, 0);
  1210. /*
  1211. * Send a bouquet of ECHO_ME pkts
  1212. */
  1213. if (me.cur_node->flags & MAP_HNODE) {
  1214. pkt.hdr.sz = sizeof(u_char);
  1215. pkt.hdr.flags |= HOOK_PKT | BCAST_PKT;
  1216. pkt.msg = xmalloc(pkt.hdr.sz);
  1217. debug(DBG_INSANE, "Radar scan 0x%x activated", my_echo_id);
  1218. } else
  1219. total_radars++;
  1220. if (restricted_mode)
  1221. pkt.hdr.flags |= RESTRICTED_PKT;
  1222. /* Loop through the me.cur_ifs array, sending the bouquet using all the
  1223. * interfaces we have */
  1224. for (d = 0; d < me.cur_ifs_n; d++) {
  1225. pkt_add_dev(&pkt, &me.cur_ifs[d], 1);
  1226. pkt.sk = 0; /* Create a new socket */
  1227. /* Send MAX_RADAR_SCANS# packets using me.cur_ifs[d] as
  1228. * outgoing interface */
  1229. for (i = 0, echo_scan = 0; i < MAX_RADAR_SCANS; i++, echo_scan++) {
  1230. if (me.cur_node->flags & MAP_HNODE)
  1231. memcpy(pkt.msg, &echo_scan, sizeof(u_char));
  1232. err = send_rq(&pkt, 0, ECHO_ME, my_echo_id, 0, 0, 0);
  1233. if (err < 0) {
  1234. if (errno == ENODEV) {
  1235. /*
  1236. * The me.cur_ifs[d] device doesn't
  1237. * exist anymore. Delete it.
  1238. */
  1239. fatal("The device \"%s\" has been removed",
  1240. me.cur_ifs[d].dev_name);
  1241. ifs_del(me.cur_ifs, &me.cur_ifs_n, d);
  1242. d--;
  1243. } else
  1244. error(ERROR_MSG "Error while sending the"
  1245. " scan 0x%x... skipping",
  1246. ERROR_FUNC, my_echo_id);
  1247. break;
  1248. }
  1249. radar_scans[d]++;
  1250. total_radar_scans++;
  1251. }
  1252. if (!radar_scans[d])
  1253. error("radar_scan(): The scan 0x%x on the %s interface failed."
  1254. " Not a single scan was sent", my_echo_id,
  1255. pkt.dev->dev_name);
  1256. if (pkt.sk > 0)
  1257. inet_close(&pkt.sk);
  1258. }
  1259. pkt_free(&pkt, 1);
  1260. if (!total_radar_scans) {
  1261. error("radar_scan(): The scan 0x%x failed. It wasn't possible "
  1262. "to send a single scan", my_echo_id);
  1263. return -1;
  1264. }
  1265. xtimer(max_radar_wait, max_radar_wait << 1, &radar_wait_counter);
  1266. final_radar_queue();
  1267. radar_update_map();
  1268. if (activate_qspn)
  1269. for (i = 0; i < me.cur_quadg.levels; i++)
  1270. if (send_qspn_now[i]) {
  1271. p = xmalloc(sizeof(int));
  1272. *p = i;
  1273. /* We start a new qspn_round in the `i'-th level */
  1274. pthread_create(&thread, &radar_qspn_send_t_attr,
  1275. radar_qspn_send_t, (void *) p);
  1276. }
  1277. if (!(me.cur_node->flags & MAP_HNODE))
  1278. reset_radar();
  1279. radar_scan_mutex = 0;
  1280. return 0;
  1281. }
  1282. /*
  1283. * radard
  1284. *
  1285. * It sends back to rpkt.from the ECHO_REPLY pkt in reply to the ECHO_ME
  1286. * pkt received.
  1287. */
  1288. int
  1289. radard(PACKET rpkt)
  1290. {
  1291. PACKET pkt;
  1292. struct radar_queue *rq;
  1293. ssize_t err;
  1294. const char *ntop = 0;
  1295. int dev_pos;
  1296. u_char echo_scans_count;
  1297. if (alwd_rnodes_counter && !is_rnode_allowed(rpkt.from, alwd_rnodes)) {
  1298. debug(DBG_INSANE, "Filtering 0x%x ECHO_ME", rpkt.hdr.id);
  1299. return -1;
  1300. }
  1301. if ((rpkt.hdr.flags & RESTRICTED_PKT && !restricted_mode) ||
  1302. (!(rpkt.hdr.flags & RESTRICTED_PKT) && restricted_mode))
  1303. return -1;
  1304. dev_pos = ifs_get_pos(me.cur_ifs, me.cur_ifs_n, rpkt.dev);
  1305. if (dev_pos < 0)
  1306. debug(DBG_NORMAL, "The 0x%x ECHO_ME pkt was received by a non "
  1307. "existent interface", rpkt.hdr.id);
  1308. /* If we are hooking we reply only to others hooking nodes */
  1309. if (me.cur_node->flags & MAP_HNODE) {
  1310. if (rpkt.hdr.flags & HOOK_PKT) {
  1311. memcpy(&echo_scans_count, rpkt.msg, sizeof(u_char));
  1312. /*
  1313. * So, we are hooking, but we haven't yet started the
  1314. * first scan or we have done less scans than rpkt.from,
  1315. * this means that this node, who is hooking
  1316. * too and sent us this rpkt, has started the hook
  1317. * before us. If we are in a black zone, this flag
  1318. * will be used to decide which of the hooking nodes
  1319. * have to create the new gnode: if it is set we'll wait,
  1320. * the other hooking node will create the gnode, then we
  1321. * restart the hook. Clear?
  1322. */
  1323. if (!radar_scan_mutex
  1324. || echo_scans_count >= radar_scans[dev_pos])
  1325. hook_retry = 1;
  1326. } else {
  1327. /*debug(DBG_NOISE, "ECHO_ME pkt dropped: We are hooking"); */
  1328. return 0;
  1329. }
  1330. }
  1331. /* We create the ECHO_REPLY pkt */
  1332. setzero(&pkt, sizeof(PACKET));
  1333. pkt_addto(&pkt, &rpkt.from);
  1334. pkt_addsk(&pkt, rpkt.from.family, rpkt.sk, SKT_UDP);
  1335. if (me.cur_node->flags & MAP_HNODE) {
  1336. /*
  1337. * We attach in the ECHO_REPLY a flag that indicates if we have
  1338. * finished our radar_scan or not. This is usefull if we already
  1339. * sent all the ECHO_ME pkts of our radar scan and while we are
  1340. * waiting the MAX_RADAR_WAIT another node start the hooking:
  1341. * with this flag it can know if we came before him.
  1342. */
  1343. u_char scanning = 1;
  1344. pkt.hdr.sz = sizeof(u_char);
  1345. pkt.hdr.flags |= HOOK_PKT;
  1346. pkt.msg = xmalloc(pkt.hdr.sz);
  1347. if (radar_scans[dev_pos] == MAX_RADAR_SCANS)
  1348. scanning = 0;
  1349. memcpy(pkt.msg, &scanning, sizeof(u_char));
  1350. /*
  1351. * W Poetry Palazzolo, the enlightening holy garden.
  1352. * Sat Mar 12 20:41:36 CET 2005
  1353. */
  1354. }
  1355. if (restricted_mode)
  1356. pkt.hdr.flags |= RESTRICTED_PKT;
  1357. /* We send it */
  1358. err = send_rq(&pkt, 0, ECHO_REPLY, rpkt.hdr.id, 0, 0, 0);
  1359. pkt_free(&pkt, 0);
  1360. if (err < 0) {
  1361. error("radard(): Cannot send back the ECHO_REPLY to %s.", ntop);
  1362. return -1;
  1363. }
  1364. /*
  1365. * Ok, we have sent the reply, now we can update the radar_queue with
  1366. * calm.
  1367. */
  1368. if (radar_q) {
  1369. rq = add_radar_q(rpkt);
  1370. rq->pings++;
  1371. #ifdef DEBUG
  1372. if (server_opt.dbg_lvl && rq->pings == 1 &&
  1373. me.cur_node->flags & MAP_HNODE) {
  1374. ntop = inet_to_str(pkt.to);
  1375. debug(DBG_INSANE, "%s(0x%x) to %s", rq_to_str(ECHO_REPLY),
  1376. rpkt.hdr.id, ntop);
  1377. }
  1378. #endif
  1379. }
  1380. return 0;
  1381. }
  1382. /*
  1383. * refresh_hook_root_node
  1384. *
  1385. * At hooking the radar_scan doesn't have an int_map, so
  1386. * all the nodes it found are stored in fake nodes. When we finish the hook,
  1387. * instead, we have an int_map, so we convert all this fake nodes into real
  1388. * nodes. To do this we modify each rq->node of the radar_queue and recall the
  1389. * radar_update_map() func.
  1390. * rnode_list and qspn_b are also updated.
  1391. * Note: the me.cur_node must be deleted prior the call of this function.
  1392. */
  1393. int
  1394. refresh_hook_root_node(void)
  1395. {
  1396. struct radar_queue *rq;
  1397. map_node *rnode;
  1398. int ret;
  1399. rq = radar_q;
  1400. list_for(rq) {
  1401. ret = iptomap((u_int) me.int_map, rq->ip, me.cur_quadg.ipstart[1],
  1402. &rnode);
  1403. if (ret)
  1404. rq->node = (map_node *) RADQ_EXT_RNODE;
  1405. else
  1406. rq->node = rnode;
  1407. }
  1408. radar_update_map();
  1409. /*
  1410. * Remove all the rnode_list structs which refers to the fake
  1411. * rnodes.
  1412. */
  1413. rnl_del_dead_rnode(&rlist, &rlist_counter, me.cur_node);
  1414. /* Do the same for the qspn_b buffer */
  1415. qspn_b_del_all_dead_rnodes();
  1416. return 0;
  1417. }
  1418. /*
  1419. * radar_daemon
  1420. *
  1421. * keeps the radar up until the end of the universe.
  1422. */
  1423. void *
  1424. radar_daemon(void *null)
  1425. {
  1426. /* If `radar_daemon_ctl' is set to 0 the radar_daemon will stop.
  1427. * It will restart when it becomes again 1 */
  1428. radar_daemon_ctl = 1;
  1429. debug(DBG_NORMAL, "Radar daemon up & running");
  1430. for (;;) {
  1431. while (!radar_daemon_ctl)
  1432. sleep(1);
  1433. radar_scan(1);
  1434. }
  1435. }
  1436. /*
  1437. * radar_wait_new_scan
  1438. *
  1439. * It sleeps until a new radar scan is sent
  1440. */
  1441. void
  1442. radar_wait_new_scan(void)
  1443. {
  1444. int old_echo_id, old_radar_wait_counter;
  1445. old_echo_id = my_echo_id;
  1446. old_radar_wait_counter = radar_wait_counter;
  1447. for (; old_echo_id == my_echo_id;) {
  1448. usleep(505050);
  1449. /* If the radar_wait_counter doesn't change, that means that
  1450. * the radar isn't active */
  1451. if (radar_wait_counter == old_radar_wait_counter)
  1452. break;
  1453. }
  1454. }
  1455. /*EoW*/