You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

radar.c 40KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716
  1. /* This file is part of Netsukuku
  2. * (c) Copyright 2005 Andrea Lo Pumo aka AlpT <alpt@freaknet.org>
  3. *
  4. * This source code is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as published
  6. * by the Free Software Foundation; either version 2 of the License,
  7. * or (at your option) any later version.
  8. *
  9. * This source code is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * Please refer to the GNU Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Public License along with
  15. * this source code; if not, write to:
  16. * Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. *
  18. * --
  19. *
  20. * radar.c
  21. *
  22. * The radar sends in broadcast a bouquet of MAX_RADAR_SCANS# packets and waits
  23. * for the ECHO_REPLY of the nodes which are alive. It then recollects the
  24. * replies and builds a small statistic, updates, if necessary, the internal
  25. * maps, the bnode maps and the qspn buffer.
  26. * A radar is fired periodically by the radar_daemon(), which is started as a
  27. * thread.
  28. */
  29. #include "includes.h"
  30. #include "llist.c"
  31. #include "endianness.h"
  32. #include "if.h"
  33. #include "bmap.h"
  34. #include "route.h"
  35. #include "request.h"
  36. #include "pkts.h"
  37. #include "qspn.h"
  38. #include "radar.h"
  39. #include "netsukuku.h"
  40. #include "common.h"
  41. pthread_attr_t radar_qspn_send_t_attr;
  42. void
  43. first_init_radar(void)
  44. {
  45. max_radar_wait = MAX_RADAR_WAIT;
  46. pthread_attr_init(&radar_qspn_send_t_attr);
  47. pthread_attr_setdetachstate(&radar_qspn_send_t_attr,
  48. PTHREAD_CREATE_DETACHED);
  49. /* register the radar's ops in the pkt_op_table */
  50. add_pkt_op(ECHO_ME, SKT_BCAST, ntk_udp_radar_port, radard);
  51. add_pkt_op(ECHO_REPLY, SKT_UDP, ntk_udp_radar_port, radar_recv_reply);
  52. rlist = (struct rnode_list *) clist_init(&rlist_counter);
  53. alwd_rnodes =
  54. (struct allowed_rnode *) clist_init(&alwd_rnodes_counter);
  55. radar_daemon_ctl = 0;
  56. init_radar();
  57. }
  58. void
  59. last_close_radar(void)
  60. {
  61. close_radar();
  62. rnl_reset(&rlist, &rlist_counter);
  63. }
  64. void
  65. init_radar(void)
  66. {
  67. hook_retry = 0;
  68. my_echo_id = 0;
  69. total_radar_scans = 0;
  70. setzero(radar_scans, sizeof(radar_scans));
  71. radar_scan_mutex = 0;
  72. radar_q = (struct radar_queue *) clist_init(&radar_q_counter);
  73. setzero(send_qspn_now, sizeof(u_char) * MAX_LEVELS);
  74. }
  75. void
  76. close_radar(void)
  77. {
  78. if (radar_q_counter)
  79. clist_destroy(&radar_q, &radar_q_counter);
  80. }
  81. void
  82. reset_radar(void)
  83. {
  84. if (me.cur_node->flags & MAP_HNODE) {
  85. free_new_node();
  86. rnl_reset(&rlist, &rlist_counter);
  87. }
  88. close_radar();
  89. init_radar();
  90. }
  91. /*
  92. * free_new_node
  93. *
  94. * frees all the temporary alloced rq->node structs used at the
  95. * hook time.
  96. */
  97. void
  98. free_new_node(void)
  99. {
  100. struct radar_queue *rq;
  101. rq = radar_q;
  102. list_for(rq)
  103. if (rq->node && ((uintptr_t) rq->node != RADQ_EXT_RNODE)) {
  104. xfree(rq->node);
  105. rq->node = 0;
  106. }
  107. }
  108. /*
  109. * find_node_radar_q
  110. *
  111. * returns the first radar_queue struct which has the
  112. * rq->node pointer equal to `node'.
  113. */
  114. struct radar_queue *
  115. find_node_radar_q(map_node * node)
  116. {
  117. struct radar_queue *rq;
  118. rq = radar_q;
  119. list_for(rq)
  120. if (rq->node == node)
  121. return rq;
  122. return 0;
  123. }
  124. /*
  125. * find_ip_radar_q
  126. *
  127. * returns the first radar_queue struct which has the rq->ip
  128. * member equal to the given `ip'.
  129. */
  130. struct radar_queue *
  131. find_ip_radar_q(inet_prefix * ip)
  132. {
  133. struct radar_queue *rq;
  134. rq = radar_q;
  135. list_for(rq)
  136. if (!memcmp(rq->ip.data, ip->data, MAX_IP_SZ))
  137. return rq;
  138. return 0;
  139. }
  140. /*
  141. * rnl_add
  142. *
  143. * adds a new rnode_list struct in the `*rnlist' list. The new
  144. * allocated struct will be filled respectively with `rnode' and `dev'.
  145. * It returns the added `rnode_list' struct.
  146. */
  147. struct rnode_list *
  148. rnl_add(struct rnode_list **rnlist, int *rnlist_counter,
  149. map_node * rnode, interface * dev)
  150. {
  151. struct rnode_list *rnl;
  152. rnl = xzalloc(sizeof(struct rnode_list));
  153. rnl->node = (map_node *) rnode;
  154. rnl->dev[0] = dev;
  155. rnl->dev_n++;
  156. clist_add(rnlist, rnlist_counter, rnl);
  157. return rnl;
  158. }
  159. /*
  160. * rnl_del
  161. *
  162. * deletes the `rnl' struct from the `rnlist' rnode_list.
  163. * If `close_socket' is not zero, `rnl'->tcp_sk will be closed.
  164. */
  165. void
  166. rnl_del(struct rnode_list **rnlist, int *rnlist_counter,
  167. struct rnode_list *rnl, int close_socket)
  168. {
  169. if (rnl) {
  170. if (close_socket && rnl->tcp_sk)
  171. inet_close(&rnl->tcp_sk);
  172. clist_del(rnlist, rnlist_counter, rnl);
  173. }
  174. if (!(*rnlist_counter))
  175. *rnlist = 0;
  176. }
  177. /*
  178. * rnl_reset
  179. *
  180. * reset the whole rnode_list
  181. */
  182. void
  183. rnl_reset(struct rnode_list **rnlist, int *rnlist_counter)
  184. {
  185. struct rnode_list *rnl = *rnlist, *next;
  186. list_safe_for(rnl, next)
  187. rnl_del(rnlist, rnlist_counter, rnl, 1);
  188. *rnlist = (struct rnode_list *) clist_init(rnlist_counter);
  189. }
  190. /*
  191. * rnl_del_dead_rnode
  192. *
  193. * it removes all the rnode_list structs which are related
  194. * to a rnode which doesn't exist anymore in `root_node'
  195. * It returns the number of delete rnodes_list structs.
  196. */
  197. int
  198. rnl_del_dead_rnode(struct rnode_list **rnlist, int *rnlist_counter,
  199. map_node * root_node)
  200. {
  201. struct rnode_list *rnl = *rnlist, *next;
  202. int i = 0;
  203. list_safe_for(rnl, next)
  204. if (rnode_find(root_node, rnl->node) < 0) {
  205. rnl_del(rnlist, rnlist_counter, rnl, 1);
  206. i++;
  207. }
  208. return i;
  209. }
  210. /*
  211. * rnl_find_rpos
  212. *
  213. * returns the first rnode_list struct, contained in
  214. * `rnlist', which has rnl->node equal to `node'.
  215. */
  216. struct rnode_list *
  217. rnl_find_node(struct rnode_list *rnlist, map_node * node)
  218. {
  219. struct rnode_list *rnl = rnlist;
  220. list_for(rnl)
  221. if (rnl->node == node)
  222. return rnl;
  223. return 0;
  224. }
  225. /*
  226. * rnl_add_dev
  227. *
  228. * If `rnl' is 0 a new struct is added in `*rnlist' using `node'.
  229. * In both cases the `new_dev' is added in the rnl->dev[] array of
  230. * pointers (if it isn't already present there) and rnl->dev_n is
  231. * incremented.
  232. * On error -1 is returned.
  233. */
  234. int
  235. rnl_add_dev(struct rnode_list **rnlist, int *rnlist_counter,
  236. struct rnode_list *rnl, map_node * node, interface * new_dev)
  237. {
  238. int i;
  239. if (!rnl) {
  240. rnl = rnl_add(rnlist, rnlist_counter, node, new_dev);
  241. return 0;
  242. }
  243. if (rnl->dev_n >= MAX_INTERFACES)
  244. return -1;
  245. for (i = 0; i < rnl->dev_n; i++)
  246. if (rnl->dev[i] == new_dev)
  247. return 0;
  248. rnl->dev[rnl->dev_n++] = new_dev;
  249. return 0;
  250. }
  251. /*
  252. * rnl_del_dev
  253. *
  254. * It searches a pointer in the rnl->dev[] array equal to
  255. * `del_dev'. If it is found, it is set to 0 and rnl->dev_n is decremented,
  256. * otherwise 0 is returned.
  257. * If rnlist->dev_n is 0, the found rnlist struct is deleted from the llist.
  258. * On error -1 is returned.
  259. */
  260. int
  261. rnl_del_dev(struct rnode_list **rnlist, int *rnlist_counter,
  262. struct rnode_list *rnl, interface * del_dev)
  263. {
  264. int i;
  265. if (!rnl)
  266. return 0;
  267. if (rnl->dev_n <= 0)
  268. return -1;
  269. for (i = 0; i < rnl->dev_n; i++) {
  270. if (rnl->dev[i] == del_dev) {
  271. if (i == rnl->dev_n - 1)
  272. rnl->dev[i] = 0;
  273. else {
  274. rnl->dev[i] = rnl->dev[rnl->dev_n - 1];
  275. rnl->dev[rnl->dev_n - 1] = 0;
  276. }
  277. rnl->dev_n--;
  278. break;
  279. }
  280. }
  281. if (!rnl->dev_n)
  282. rnl_del(rnlist, rnlist_counter, rnl, 1);
  283. return 0;
  284. }
  285. /*
  286. * rnl_update_devs
  287. *
  288. * it updates the device array present in the rnode_list struct of `node'.
  289. * It searches in rnlist a struct which have rnlist->node == `node',
  290. * then it substitutes rnlist->dev with `devs' and rnlist->dev_n with `dev_n'.
  291. * If there is a difference between the new `devs' array and the old one, 1 is
  292. * returned.
  293. */
  294. int
  295. rnl_update_devs(struct rnode_list **rnlist, int *rnlist_counter,
  296. map_node * node, interface ** devs, int dev_n)
  297. {
  298. struct rnode_list *old_rnl, *new_rnl;
  299. int i, dev_pos, update = 0;
  300. old_rnl = rnl_find_node(*rnlist, node);
  301. if (!dev_n) {
  302. /*
  303. * The new `devs' array is empty, therefore delete old_rnl
  304. */
  305. rnl_del(rnlist, rnlist_counter, old_rnl, 1);
  306. return 0;
  307. }
  308. if (old_rnl)
  309. /*
  310. * Diff old_rnl->dev and `devs'
  311. */
  312. for (i = 0; i < dev_n; i++) {
  313. dev_pos = FIND_PTR(devs[i], old_rnl->dev, old_rnl->dev_n);
  314. if (dev_pos < 0) {
  315. update = 1;
  316. break;
  317. }
  318. } else if (!old_rnl)
  319. update = 1;
  320. if (update) {
  321. new_rnl = rnl_add(rnlist, rnlist_counter, node, devs[0]);
  322. for (i = 1; i < dev_n; i++)
  323. rnl_add_dev(rnlist, rnlist_counter, new_rnl, node, devs[i]);
  324. new_rnl->tcp_sk = (old_rnl) ? old_rnl->tcp_sk : 0;
  325. rnl_del(rnlist, rnlist_counter, old_rnl, 0);
  326. }
  327. return update;
  328. }
  329. interface **
  330. rnl_get_dev(struct rnode_list * rnlist, map_node * node)
  331. {
  332. struct rnode_list *rnl;
  333. rnl = rnl_find_node(rnlist, node);
  334. return !rnl ? 0 : rnl->dev;
  335. }
  336. interface *
  337. rnl_get_rand_dev(struct rnode_list * rnlist, map_node * node)
  338. {
  339. struct rnode_list *rnl;
  340. return !(rnl = rnl_find_node(rnlist, node)) ?
  341. 0 : rnl->dev[rand_range(0, rnl->dev_n - 1)];
  342. }
  343. /*
  344. * rnl_get_sk
  345. *
  346. * It returns the tcp socket associated to rnode `node'.
  347. * If the socket is set to zero, it tries to create a tcp connection to
  348. * `node' to the `ntk_tcp_port' port.
  349. *
  350. * On error -1 is returned.
  351. */
  352. int
  353. rnl_get_sk(struct rnode_list *rnlist, map_node * node)
  354. {
  355. struct rnode_list *rnl;
  356. if (!(rnl = rnl_find_node(rnlist, node)))
  357. return -1;
  358. if (!rnl->tcp_sk) {
  359. inet_prefix to;
  360. int i;
  361. if (me.cur_node->flags & MAP_HNODE) {
  362. struct radar_queue *rq;
  363. /* If we are hooking, get the IP from the radar
  364. * queue */
  365. if (!(rq = find_node_radar_q(rnl->node)))
  366. return -1;
  367. inet_copy(&to, &rq->ip);
  368. } else {
  369. rnodetoip((uintptr_t) me.int_map, (uintptr_t) node,
  370. me.cur_quadg.ipstart[1], &to);
  371. }
  372. /* Try to connect using the `i'th device. If it fails, try
  373. * another device */
  374. for (i = 0; i < rnl->dev_n && rnl->tcp_sk <= 0; i++)
  375. rnl->tcp_sk = pkt_tcp_connect(&to, ntk_tcp_port, rnl->dev[i]);
  376. /* If the socket is connected, set it to keepalive */
  377. if ((rnl->tcp_sk = (rnl->tcp_sk <= 0) ? 0 : rnl->tcp_sk))
  378. set_keepalive_sk(rnl->tcp_sk);
  379. }
  380. return rnl->tcp_sk > 0 ? rnl->tcp_sk : -1;
  381. }
  382. /*
  383. * rnl_set_sk
  384. *
  385. * It sets the socket associated to rnode `node' to `sk'
  386. */
  387. void
  388. rnl_set_sk(struct rnode_list *rnlist, map_node * node, int sk)
  389. {
  390. struct rnode_list *rnl;
  391. if (!(rnl = rnl_find_node(rnlist, node)))
  392. return;
  393. rnl->tcp_sk = sk;
  394. }
  395. /*
  396. * rnl_close_all_sk
  397. *
  398. * It closes all the opened tcp_sk of the `rnlist' llist
  399. */
  400. void
  401. rnl_close_all_sk(struct rnode_list *rnlist)
  402. {
  403. struct rnode_list *rnl = rnlist;
  404. list_for(rnl)
  405. if (rnl->tcp_sk)
  406. inet_close(&rnl->tcp_sk);
  407. }
  408. /*
  409. * rnl_fill_rq
  410. *
  411. * It sets the `pkt'->sk and `pkt'->to variables.
  412. * The `pkt'->sk is retrieved using rnl_get_sk()
  413. *
  414. * On error -1 is returned.
  415. */
  416. int
  417. rnl_fill_rq(map_node * rnode, PACKET * pkt)
  418. {
  419. int tries = 0;
  420. retry:
  421. if (!pkt->sk && (pkt->sk = rnl_get_sk(rlist, rnode)) <= 0) {
  422. error(ERROR_MSG "Couldn't get the socket associated "
  423. "to dst_rnode", ERROR_FUNC);
  424. return -1;
  425. }
  426. if (inet_getpeername(pkt->sk, &pkt->to, 0) < 0) {
  427. tries++;
  428. if (tries < 2)
  429. goto retry;
  430. return -1;
  431. }
  432. return 0;
  433. }
  434. /*
  435. * rnl_send_rq
  436. *
  437. * It is a wrapper to send_rq. It is used to send or receive a packet to/from
  438. * the specified `rnode'.
  439. *
  440. * On error -1 is returned.
  441. *
  442. * Note: the pkt->sk must not be closed.
  443. */
  444. int
  445. rnl_send_rq(map_node * rnode,
  446. PACKET * pkt, int pkt_flags, u_char rq, int rq_id, u_char re,
  447. int check_ack, PACKET * rpkt)
  448. {
  449. int ret, tries = 0;
  450. retry:
  451. if (!pkt->sk && rnl_fill_rq(rnode, pkt) < 0)
  452. return -1;
  453. ret = send_rq(pkt, pkt_flags, rq, rq_id, re, check_ack, rpkt);
  454. if ((ret == SEND_RQ_ERR_CONNECT || ret == SEND_RQ_ERR_SEND ||
  455. ret == SEND_RQ_ERR_RECV)) {
  456. /* The socket has been corrupted, set it to 0 and try again */
  457. inet_close(&pkt->sk);
  458. rnl_set_sk(rlist, rnode, 0);
  459. tries++;
  460. if (tries < 2)
  461. goto retry;
  462. }
  463. return ret;
  464. }
  465. /*
  466. * is_rnode_allowed
  467. *
  468. * it verifies if the rnode described by the `rip' IP is
  469. * present in the `alr' llist. If it is 1 is returned, otherwise 0.
  470. */
  471. int
  472. is_rnode_allowed(inet_prefix rip, struct allowed_rnode *alr)
  473. {
  474. int i, e, gid[MAX_LEVELS];
  475. iptogids(&rip, gid, FAMILY_LVLS);
  476. list_for(alr) {
  477. for (e = 0, i = alr->min_level; i < alr->tot_level; i++)
  478. if (gid[i] != alr->gid[i]) {
  479. e = 1;
  480. break;
  481. }
  482. if (!e)
  483. return 1;
  484. }
  485. return 0;
  486. }
  487. /*
  488. * new_rnode_allowed
  489. *
  490. * add a new allowed rnode in the `alr' llist which has
  491. * already `*alr_counter' members. `gid', `min_lvl', and `tot_lvl' are the
  492. * respective field of the new allowed_rnode struct.
  493. */
  494. void
  495. new_rnode_allowed(struct allowed_rnode **alr, int *alr_counter,
  496. int *gid, int min_lvl, int tot_lvl)
  497. {
  498. struct allowed_rnode *new_alr;
  499. new_alr = xmalloc(sizeof(struct allowed_rnode));
  500. new_alr->min_level = min_lvl;
  501. new_alr->tot_level = tot_lvl;
  502. setzero(new_alr->gid, sizeof(int) * MAX_LEVELS);
  503. memcpy(&new_alr->gid[min_lvl], &gid[min_lvl],
  504. sizeof(int) * (tot_lvl - min_lvl));
  505. debug(DBG_SOFT,
  506. "new_rnode_allowed: %d, %d, %d, %d. min_lvl: %d, tot_lvl: %d",
  507. gid[0], gid[1], gid[2], gid[3], min_lvl, tot_lvl);
  508. clist_add(alr, alr_counter, new_alr);
  509. }
  510. void
  511. reset_rnode_allowed(struct allowed_rnode **alr, int *alr_counter)
  512. {
  513. if (*alr)
  514. list_destroy((*alr));
  515. *alr = (struct allowed_rnode *) clist_init(alr_counter);
  516. }
  517. /*
  518. * count_hooking_nodes
  519. *
  520. * returns the number of hooking nodes, which are stored
  521. * in the radar_queue.
  522. */
  523. int
  524. count_hooking_nodes(void)
  525. {
  526. struct radar_queue *rq;
  527. int total_hooking_nodes = 0;
  528. rq = radar_q;
  529. list_for(rq) {
  530. if (!rq->node)
  531. continue;
  532. if (rq->node->flags & MAP_HNODE)
  533. total_hooking_nodes++;
  534. }
  535. return total_hooking_nodes;
  536. }
  537. /*
  538. * final_radar_queue
  539. *
  540. * analyses the received ECHO_REPLY pkt and write the
  541. * average rtt of each found node in the radar_queue.
  542. */
  543. void
  544. final_radar_queue(void)
  545. {
  546. struct radar_queue *rq;
  547. int e;
  548. struct timeval sum;
  549. u_int f_rtt;
  550. setzero(&sum, sizeof(struct timeval));
  551. rq = radar_q;
  552. list_for(rq) {
  553. if (!rq->node)
  554. continue;
  555. /* Sum the rtt of all the received pongs */
  556. for (e = 0; e < rq->pongs; e++)
  557. timeradd(&rq->rtt[e], &sum, &sum);
  558. /* Add penality rtt for each pong lost */
  559. for (; e < MAX_RADAR_SCANS; e++)
  560. timeradd(&rq->rtt[e - rq->pongs], &sum, &sum);
  561. f_rtt = MILLISEC(sum) / MAX_RADAR_SCANS;
  562. MILLISEC_TO_TV(f_rtt, rq->final_rtt);
  563. }
  564. my_echo_id = 0;
  565. }
  566. /*
  567. * radar_remove_old_rnodes
  568. *
  569. * It removes all the old rnodes ^_- It store in rnode_delete[level] the number
  570. * of deleted rnodes. This function is used by radar_update_map
  571. */
  572. int
  573. radar_remove_old_rnodes(char *rnode_deleted)
  574. {
  575. map_node *node, *root_node, *broot_node;
  576. map_gnode *gnode;
  577. map_bnode *bnode;
  578. ext_rnode *e_rnode = 0;
  579. ext_rnode_cache *erc;
  580. struct qspn_buffer *qb;
  581. struct rnode_list *rnl;
  582. int i, e, node_pos, bm, rnode_pos, bnode_rnode_pos, root_node_pos;
  583. int broot_node_pos;
  584. int level, blevel, external_node, total_levels, first_level;
  585. void *void_gnode;
  586. if (!me.cur_node->links)
  587. return 0;
  588. for (i = 0; i < me.cur_node->links; i++) {
  589. node = (map_node *) me.cur_node->r_node[i].r_node;
  590. if (!(node->flags & MAP_VOID))
  591. /* The rnode is not really dead! */
  592. continue;
  593. if (node->flags & MAP_ERNODE) {
  594. e_rnode = (ext_rnode *) node;
  595. external_node = 1;
  596. total_levels = e_rnode->quadg.levels;
  597. first_level = 1;
  598. quadg_setflags(&e_rnode->quadg, MAP_VOID);
  599. } else {
  600. external_node = 0;
  601. total_levels = 1;
  602. first_level = 0;
  603. }
  604. for (level = first_level; level < total_levels; level++) {
  605. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  606. blevel = level - 1;
  607. /* delete the rnode from the rnode_list */
  608. rnl = rnl_find_node(rlist, node);
  609. rnl_del(&rlist, &rlist_counter, rnl, 1);
  610. /*
  611. * Just delete it from all the maps.
  612. */
  613. if (!level && !external_node) {
  614. node_pos = pos_from_node(node, me.int_map);
  615. rnode_pos = i;
  616. debug(DBG_NORMAL, "radar: The node %d is dead", node_pos);
  617. /* delete it from the int_map and update the gcount */
  618. map_node_del(node);
  619. qspn_dec_gcount((u_int *) qspn_gnode_count, level + 1, 1);
  620. /* delete the route */
  621. rt_update_node(0, node, 0, 0, 0, level);
  622. send_qspn_now[level] = 1;
  623. } else {
  624. gnode = e_rnode->quadg.gnode[_EL(level)];
  625. /** delete the direct route to the ext_node */
  626. if (level == 1)
  627. rt_update_node(&e_rnode->quadg.ipstart[0],
  628. e_rnode, 0, 0, 0, /*level=0 */ 0);
  629. /**/ void_gnode = (void *) gnode;
  630. if (!void_gnode)
  631. continue;
  632. node_pos = pos_from_gnode(gnode, me.ext_map[_EL(level)]);
  633. rnode_pos = g_rnode_find((map_gnode *) root_node, gnode);
  634. debug(DBG_NORMAL, "The ext_node (gid %d, lvl %d) is"
  635. " dead", e_rnode->quadg.gid[level], level);
  636. /* bnode_map update */
  637. for (e = 0; blevel >= 0; blevel--) {
  638. qspn_set_map_vars(blevel, 0, &broot_node,
  639. &broot_node_pos, 0);
  640. bm = map_find_bnode(me.bnode_map[blevel],
  641. me.bmap_nodes[blevel],
  642. broot_node_pos);
  643. if (bm == -1)
  644. continue;
  645. bnode = &me.bnode_map[blevel][bm];
  646. bnode_rnode_pos = rnode_find(bnode,
  647. (map_node *) e_rnode->
  648. quadg.gnode[_EL(level)]);
  649. if (bnode_rnode_pos != -1)
  650. rnode_del(bnode, bnode_rnode_pos);
  651. if (!bnode->links) {
  652. me.bnode_map[blevel] =
  653. map_bnode_del(me.bnode_map[blevel],
  654. &me.bmap_nodes[blevel], bnode);
  655. broot_node->flags &= ~MAP_BNODE;
  656. } else
  657. e = 1;
  658. }
  659. if (!e) /* We are no more a bnode */
  660. me.cur_node->flags &= ~MAP_BNODE;
  661. /* If we were the only bnode which bordered on
  662. * `gnode', delete it from the map */
  663. if (map_find_bnode_rnode
  664. (me.bnode_map[level - 1], me.bmap_nodes[level - 1],
  665. gnode) == -1) {
  666. qspn_dec_gcount((u_int *) qspn_gnode_count, level + 1,
  667. gnode->gcount);
  668. gmap_node_del(gnode);
  669. gnode_dec_seeds(&me.cur_quadg, level); /* update the seeds */
  670. }
  671. /* Delete the entries from the routing table */
  672. rt_update_node(0, 0, &e_rnode->quadg, 0, 0, level);
  673. send_qspn_now[level] = 1;
  674. }
  675. if (rnode_pos >= 0 && root_node->links > 0)
  676. rnode_del(root_node, rnode_pos);
  677. if (!root_node->links) {
  678. /* We are alone in the dark. Sigh. */
  679. qspn_time_reset(level, level, FAMILY_LVLS);
  680. } else if (!external_node)
  681. erc_update_rnodepos(me.cur_erc, root_node, rnode_pos);
  682. /* Now we delete it from the qspn_buffer */
  683. if (qspn_b[level]) {
  684. qb = qspn_b[level];
  685. qb = qspn_b_find_rnode(qb, node);
  686. if (qb)
  687. qspn_b[level] = list_del(qspn_b[level], qb);
  688. }
  689. SET_BIT(rnode_deleted, level);
  690. }
  691. /*
  692. * Kick out the external_node from the root_node and destroy it
  693. * from the ext_rnode_cache
  694. */
  695. if (external_node) {
  696. /* external rnode cache update */
  697. erc = erc_find(me.cur_erc, e_rnode);
  698. if (erc)
  699. e_rnode_del(&me.cur_erc, &me.cur_erc_counter, erc);
  700. rnode_del(me.cur_node, i);
  701. }
  702. /* If the rnode we deleted from the root_node was swapped with
  703. * the last rnodes, we have to inspect again the same
  704. * root_node->r_node[ `i' ] rnode, because now it is another
  705. * rnode */
  706. if (i != (me.cur_node->links + 1) - 1)
  707. i--;
  708. }
  709. if (!me.cur_node->links) {
  710. /* - Diary -
  711. * Tue Mar 14 07:29:58 CET 2006
  712. * Damn! All my rnodes died, I am the last survivor in this
  713. * great lone land... I have to reset my memory... farewell!
  714. */
  715. qspn_reset_counters(FAMILY_LVLS);
  716. }
  717. return 0;
  718. }
  719. /*
  720. * radar_update_bmap
  721. *
  722. * updates the bnode map of the given `level' the root_node bnode in the bmap
  723. * will also point to the gnode of level `gnode_level'+1 that is
  724. * `rq'->quadg.gnode[_EL(gnode_level+1)].
  725. */
  726. void
  727. radar_update_bmap(struct radar_queue *rq, int level, int gnode_level)
  728. {
  729. map_gnode *gnode;
  730. map_node *root_node;
  731. map_rnode *rnode, rn;
  732. int bm, rnode_pos, root_node_pos;
  733. if (level == me.cur_quadg.levels - 1)
  734. return;
  735. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  736. gnode = rq->quadg.gnode[_EL(gnode_level + 1)];
  737. bm = map_find_bnode(me.bnode_map[level], me.bmap_nodes[level],
  738. root_node_pos);
  739. if (bm == -1) {
  740. bm = map_add_bnode(&me.bnode_map[level], &me.bmap_nodes[level],
  741. root_node_pos, 0);
  742. rnode_pos = -1;
  743. } else
  744. rnode_pos = rnode_find(&me.bnode_map[level][bm], &gnode->g);
  745. if (rnode_pos == -1) {
  746. setzero(&rn, sizeof(map_rnode));
  747. rn.r_node = (int *) &gnode->g;
  748. rnode_add(&me.bnode_map[level][bm], &rn);
  749. rnode_pos = 0;
  750. }
  751. rnode = &me.bnode_map[level][bm].r_node[rnode_pos];
  752. rnode->trtt = MILLISEC(rq->final_rtt);
  753. }
  754. /*
  755. * radar_update_map
  756. *
  757. * it updates the int_map and the ext_map if any bnodes are found.
  758. * Note that the rnodes in the map are held in a different way. First of all the qspn
  759. * is not applied to them (we already know how to reach them ;) and they have only
  760. * one rnode... ME. So me.cur_node->r_node[x].r_node->r_node[0] == me.cur_node.
  761. * Gotcha?
  762. */
  763. void
  764. radar_update_map(void)
  765. {
  766. struct qspn_buffer *qb;
  767. struct radar_queue *rq;
  768. ext_rnode_cache *erc;
  769. map_gnode *gnode = 0;
  770. map_node *node, *root_node;
  771. map_rnode rnn, *new_root_rnode;
  772. ext_rnode *e_rnode;
  773. int i, diff, rnode_pos;
  774. u_char rnode_added[MAX_LEVELS / 8], rnode_deleted[MAX_LEVELS / 8];
  775. int level, external_node, total_levels, root_node_pos, node_update;
  776. const char *ntop;
  777. char updated_rnodes, routes_update, devs_update;
  778. updated_rnodes = routes_update = devs_update = 0;
  779. setzero(rnode_added, sizeof(rnode_added));
  780. setzero(rnode_deleted, sizeof(rnode_deleted));
  781. /**
  782. * Let's consider all our rnodes void, in this way we'll know what
  783. * rnodes will remain void after the update.
  784. */
  785. for (i = 0; i < me.cur_node->links; i++) {
  786. node = (map_node *) me.cur_node->r_node[i].r_node;
  787. node->flags |= MAP_VOID | MAP_UPDATE;
  788. }
  789. /**/ rq = radar_q;
  790. list_for(rq) {
  791. if (!rq->node)
  792. continue;
  793. if (!(me.cur_node->flags & MAP_HNODE) && (rq->flags & MAP_HNODE))
  794. continue;
  795. /*
  796. * We need to know if it is a node which is not in the gnode
  797. * where we are (external_rnode).
  798. */
  799. if ((uintptr_t) rq->node == RADQ_EXT_RNODE) {
  800. external_node = 1;
  801. total_levels = rq->quadg.levels;
  802. } else {
  803. external_node = 0;
  804. total_levels = 1;
  805. }
  806. for (level = total_levels - 1; level >= 0; level--) {
  807. qspn_set_map_vars(level, 0, &root_node, &root_node_pos, 0);
  808. node_update = devs_update = 0;
  809. if (!level) {
  810. node = rq->node;
  811. } else {
  812. /* Skip the levels where the ext_rnode belongs
  813. * to our same gids */
  814. if (!quadg_gids_cmp(rq->quadg, me.cur_quadg, level))
  815. continue;
  816. /* Update only the gnodes which belongs to
  817. * our same gid of the upper level, because
  818. * we don't keep the internal info of the
  819. * extern gnodes. */
  820. if ((level < rq->quadg.levels - 1) &&
  821. quadg_gids_cmp(rq->quadg, me.cur_quadg, level + 1)) {
  822. rq->quadg.gnode[_EL(level)] = 0;
  823. continue;
  824. }
  825. /* Ehi, we are a bnode */
  826. root_node->flags |= MAP_BNODE;
  827. me.cur_node->flags |= MAP_BNODE;
  828. gnode = rq->quadg.gnode[_EL(level)];
  829. node = &gnode->g;
  830. }
  831. if (external_node && !level && me.cur_erc_counter) {
  832. erc = e_rnode_find(me.cur_erc, &rq->quadg, 0);
  833. if (!erc)
  834. rnode_pos = -1;
  835. else {
  836. rnode_pos = erc->rnode_pos;
  837. node = (map_node *) erc->e;
  838. }
  839. } else
  840. rnode_pos = rnode_find(root_node, node);
  841. if (rnode_pos == -1) { /* W00t, we've found a new rnode! */
  842. node_update = 1;
  843. rnode_pos = root_node->links;
  844. ntop = inet_to_str(rq->quadg.ipstart[level]);
  845. if (server_opt.dbg_lvl || !level)
  846. loginfo
  847. ("Radar: New node found: %s, ext: %d, level: %d",
  848. ntop, external_node, level);
  849. if (external_node && !level) {
  850. /*
  851. * If this node we are processing is external, at level 0,
  852. * in the root_node's rnodes we add a rnode which point
  853. * to a ext_rnode struct.
  854. */
  855. setzero(&rnn, sizeof(map_rnode));
  856. e_rnode = xzalloc(sizeof(ext_rnode));
  857. memcpy(&e_rnode->quadg, &rq->quadg,
  858. sizeof(quadro_group));
  859. e_rnode->node.flags =
  860. MAP_BNODE | MAP_GNODE | MAP_RNODE | MAP_ERNODE;
  861. rnn.r_node = (int *) e_rnode;
  862. node = rq->node = &e_rnode->node;
  863. new_root_rnode = &rnn;
  864. /* Update the external_rnode_cache list */
  865. e_rnode_add(&me.cur_erc, e_rnode, rnode_pos,
  866. &me.cur_erc_counter);
  867. } else {
  868. /*We purge all the node's rnodes. */
  869. rnode_destroy(node);
  870. /*
  871. * This node has only one rnode,
  872. * and that is the root_node.
  873. */
  874. setzero(&rnn, sizeof(map_rnode));
  875. rnn.r_node = (int *) root_node;
  876. rnode_add(node, &rnn);
  877. /* It is a border node */
  878. if (level)
  879. node->flags |= MAP_BNODE | MAP_GNODE;
  880. node->flags |= MAP_RNODE;
  881. /*
  882. * Fill the rnode to be added in the
  883. * root_node.
  884. */
  885. setzero(&rnn, sizeof(map_rnode));
  886. rnn.r_node = (int *) node;
  887. new_root_rnode = &rnn;
  888. }
  889. /*
  890. * The new node is added in the root_node's
  891. * rnodes.
  892. */
  893. rnode_add(root_node, new_root_rnode);
  894. /* Update the qspn_buffer */
  895. if (!external_node || level) {
  896. qb = xzalloc(sizeof(struct qspn_buffer));
  897. qb->rnode = node;
  898. qspn_b[level] = list_add(qspn_b[level], qb);
  899. send_qspn_now[level] = 1;
  900. }
  901. /* If the new rnode wasn't present in the map,
  902. * then it is also a new node in the map, so
  903. * update the seeds counter too */
  904. if (!level && !external_node && (node->flags & MAP_VOID)) {
  905. gnode_inc_seeds(&me.cur_quadg, level);
  906. qspn_inc_gcount(qspn_gnode_count, level + 1, 1);
  907. }
  908. SET_BIT(rnode_added, level);
  909. } else {
  910. /*
  911. * Nah, We have the node in the map. Let's see if
  912. * its rtt is changed
  913. */
  914. if (!send_qspn_now[level] && node->links) {
  915. diff = abs(root_node->r_node[rnode_pos].trtt -
  916. MILLISEC(rq->final_rtt));
  917. if (diff >= RTT_DELTA) {
  918. node_update = 1;
  919. send_qspn_now[level] = 1;
  920. debug(DBG_NOISE, "node %s rtt changed, diff: %d",
  921. inet_to_str(rq->ip), diff);
  922. }
  923. }
  924. }
  925. /* Restore the flags */
  926. if (level)
  927. gnode->flags &= ~GMAP_VOID;
  928. node->flags &= ~MAP_VOID & ~MAP_UPDATE & ~QSPN_OLD;
  929. /*
  930. * Update the devices list of the rnode
  931. */
  932. if (!level) {
  933. devs_update = rnl_update_devs(&rlist, &rlist_counter,
  934. node, rq->dev, rq->dev_n);
  935. if (devs_update)
  936. routes_update++;
  937. }
  938. /* Nothing is really changed */
  939. if (!node_update)
  940. continue;
  941. /* Update the rtt */
  942. root_node->r_node[rnode_pos].trtt = MILLISEC(rq->final_rtt);
  943. /* Bnode map stuff */
  944. if (external_node && level) {
  945. /*
  946. * All the root_node bnodes which are in the
  947. * bmaps of level smaller than `level' points to
  948. * the same gnode which is rq->quadg.gnode[_EL(level-1+1)].
  949. * This is because the inferior levels cannot
  950. * have knowledge about the bordering gnode
  951. * which is in an upper level, but it's necessary that
  952. * they know which who the root_node borders on,
  953. * so the get_route algorithm can descend to
  954. * the inferior levels and it will still know
  955. * what is the border node which is linked
  956. * to the target gnode.
  957. */
  958. for (i = 0; i < level; i++)
  959. radar_update_bmap(rq, i, level - 1);
  960. send_qspn_now[level - 1] = 1;
  961. }
  962. if (node_update || devs_update)
  963. node->flags |= MAP_UPDATE;
  964. } /*for(level=0, ...) */
  965. updated_rnodes++;
  966. } /*list_for(rq) */
  967. /* Burn the deads */
  968. if (updated_rnodes < me.cur_node->links)
  969. radar_remove_old_rnodes((char *) rnode_deleted);
  970. /* <<keep your room tidy... order, ORDER>> */
  971. if (!is_bufzero(rnode_added, sizeof(rnode_added)) ||
  972. !is_bufzero(rnode_deleted, sizeof(rnode_deleted))) {
  973. /***
  974. * qsort the rnodes of me.cur_node and me.cur_quadg comparing
  975. * their trtt */
  976. rnode_trtt_order(me.cur_node);
  977. for (i = 1; i < me.cur_quadg.levels; i++)
  978. if (TEST_BIT(rnode_added, i) || TEST_BIT(rnode_deleted, i))
  979. rnode_trtt_order(&me.cur_quadg.gnode[_EL(i)]->g);
  980. /**/
  981. /* adjust the rnode_pos variables in the ext_rnode_cache list */
  982. erc_reorder_rnodepos(&me.cur_erc, &me.cur_erc_counter,
  983. me.cur_node);
  984. }
  985. /* Give a refresh to the kernel */
  986. if ((!is_bufzero(rnode_added, sizeof(rnode_added)) ||
  987. routes_update) && !(me.cur_node->flags & MAP_HNODE))
  988. rt_rnodes_update(1);
  989. }
  990. /*
  991. * add_radar_q
  992. *
  993. * It returns the radar_q struct which handles the pkt.from node.
  994. * If the node is not present in the radar_q, it is added, and the
  995. * relative struct will be returned.
  996. */
  997. struct
  998. radar_queue *
  999. add_radar_q(PACKET pkt)
  1000. {
  1001. map_node *rnode;
  1002. quadro_group quadg;
  1003. struct radar_queue *rq;
  1004. u_int ret = 0;
  1005. int dev_pos;
  1006. if (me.cur_node->flags & MAP_HNODE) {
  1007. /*
  1008. * We are hooking, we haven't yet an int_map, an ext_map,
  1009. * a stable ip, so we create fake nodes that will be delete after
  1010. * the hook.
  1011. */
  1012. if (!(rq = find_ip_radar_q(&pkt.from))) {
  1013. map_rnode rnn;
  1014. rnode = xmalloc(sizeof(map_node));
  1015. setzero(rnode, sizeof(map_node));
  1016. setzero(&rnn, sizeof(map_rnode));
  1017. rnn.r_node = (int *) me.cur_node;
  1018. rnode_add(rnode, &rnn);
  1019. } else
  1020. rnode = rq->node;
  1021. }
  1022. iptoquadg(pkt.from, me.ext_map, &quadg,
  1023. QUADG_GID | QUADG_GNODE | QUADG_IPSTART);
  1024. if (!(me.cur_node->flags & MAP_HNODE)) {
  1025. iptomap((uintptr_t) me.int_map, pkt.from, me.cur_quadg.ipstart[1],
  1026. &rnode);
  1027. ret = quadg_gids_cmp(me.cur_quadg, quadg, 1);
  1028. }
  1029. if (!ret)
  1030. rq = find_node_radar_q(rnode);
  1031. else
  1032. rq = find_ip_radar_q(&pkt.from);
  1033. if (!rq) {
  1034. /*
  1035. * If pkt.from isn't already in the queue, add it.
  1036. */
  1037. rq = xzalloc(sizeof(struct radar_queue));
  1038. if (ret)
  1039. rq->node = (map_node *) RADQ_EXT_RNODE;
  1040. else {
  1041. rq->node = rnode;
  1042. /* This pkt has been sent from another hooking
  1043. * node, let's remember this. */
  1044. if (pkt.hdr.flags & HOOK_PKT)
  1045. rq->node->flags |= MAP_HNODE;
  1046. }
  1047. if (pkt.hdr.flags & HOOK_PKT)
  1048. rq->flags |= MAP_HNODE;
  1049. inet_copy(&rq->ip, &pkt.from);
  1050. memcpy(&rq->quadg, &quadg, sizeof(quadro_group));
  1051. rq->dev[0] = pkt.dev;
  1052. rq->dev_n++;
  1053. clist_add(&radar_q, &radar_q_counter, rq);
  1054. } else {
  1055. /*
  1056. * Check if the input device is in the rq->dev array,
  1057. * if not add it.
  1058. */
  1059. if (rq->dev_n < MAX_INTERFACES) {
  1060. dev_pos = FIND_PTR(pkt.dev, rq->dev, rq->dev_n);
  1061. if (dev_pos < 0)
  1062. rq->dev[rq->dev_n++] = pkt.dev;
  1063. }
  1064. }
  1065. return rq;
  1066. }
  1067. /*
  1068. * radar_exec_reply
  1069. *
  1070. * It reads the received ECHO_REPLY pkt and updates the radar
  1071. * queue, storing the calculated rtt and the other infos relative to the sender
  1072. * node.
  1073. */
  1074. int
  1075. radar_exec_reply(PACKET pkt)
  1076. {
  1077. struct timeval t;
  1078. struct radar_queue *rq;
  1079. u_int rtt_ms = 0;
  1080. int dev_pos;
  1081. gettimeofday(&t, 0);
  1082. /*
  1083. * Get the radar_queue struct relative to pkt.from
  1084. */
  1085. rq = add_radar_q(pkt);
  1086. dev_pos = ifs_get_pos(me.cur_ifs, me.cur_ifs_n, pkt.dev);
  1087. if (dev_pos < 0)
  1088. debug(DBG_NORMAL, "The 0x%x ECHO_REPLY pkt was received by a non "
  1089. "existent interface", pkt.hdr.id);
  1090. if (me.cur_node->flags & MAP_HNODE) {
  1091. if (pkt.hdr.flags & HOOK_PKT) {
  1092. u_char scanning;
  1093. memcpy(&scanning, pkt.msg, sizeof(u_char));
  1094. /*
  1095. * If the pkt.from node has finished his scan, and we
  1096. * never received one of its ECHO_ME pkts, and we are
  1097. * still scanning, set the hook_retry.
  1098. */
  1099. if (!scanning && !rq->pings &&
  1100. (radar_scan_mutex ||
  1101. radar_scans[dev_pos] <= MAX_RADAR_SCANS)) {
  1102. hook_retry = 1;
  1103. }
  1104. }
  1105. }
  1106. if (rq->pongs < radar_scans[dev_pos]) {
  1107. timersub(&t, &scan_start, &rq->rtt[(int) rq->pongs]);
  1108. /*
  1109. * Now we divide the rtt, because (t - scan_start) is the time
  1110. * the pkt used to reach B from A and to return to A from B
  1111. */
  1112. rtt_ms = MILLISEC(rq->rtt[(int) rq->pongs]) / 2;
  1113. MILLISEC_TO_TV(rtt_ms, rq->rtt[(int) rq->pongs]);
  1114. rq->pongs++;
  1115. }
  1116. return 0;
  1117. }
  1118. /*
  1119. * radar_recv_reply
  1120. *
  1121. * It handles the ECHO_REPLY pkts
  1122. */
  1123. int
  1124. radar_recv_reply(PACKET pkt)
  1125. {
  1126. if (!my_echo_id || !radar_scan_mutex || !total_radar_scans)
  1127. return -1;
  1128. if (pkt.hdr.id != my_echo_id) {
  1129. debug(DBG_NORMAL, "I received an ECHO_REPLY with id: 0x%x, but "
  1130. "my current ECHO_ME is 0x%x", pkt.hdr.id, my_echo_id);
  1131. return -1;
  1132. }
  1133. /*
  1134. * If the `alwd_rnodes_counter' counter isn't zero, verify that
  1135. * `pkt.from' is an allowed rnode, otherwise drop this pkt
  1136. */
  1137. if (alwd_rnodes_counter && !is_rnode_allowed(pkt.from, alwd_rnodes)) {
  1138. debug(DBG_INSANE, "Filtering 0x%x ECHO_REPLY", pkt.hdr.id);
  1139. return -1;
  1140. }
  1141. /*
  1142. * If the rnode is in restricted mode and we are not, drop the pkt.
  1143. * If we are in restricted mode and the rnode isn't, drop the pkt
  1144. */
  1145. if ((pkt.hdr.flags & RESTRICTED_PKT && !restricted_mode) ||
  1146. (!(pkt.hdr.flags & RESTRICTED_PKT) && restricted_mode))
  1147. return -1;
  1148. return radar_exec_reply(pkt);
  1149. }
  1150. /*
  1151. * radar_qspn_send_t
  1152. *
  1153. * This function is used only by radar_scan().
  1154. * It just call the qspn_send() function. We use a thread
  1155. * because the qspn_send() may sleep, and we don't want to halt the
  1156. * radar_scan().
  1157. */
  1158. void *
  1159. radar_qspn_send_t(void *level)
  1160. {
  1161. int *p;
  1162. u_char i;
  1163. p = (int *) level;
  1164. i = (u_char) * p;
  1165. xfree(p);
  1166. qspn_send(i);
  1167. return NULL;
  1168. }
  1169. /*
  1170. * radar_scan
  1171. *
  1172. * It starts the scan of the local area.
  1173. *
  1174. * It sends MAX_RADAR_SCANS packets in broadcast then it waits MAX_RADAR_WAIT
  1175. * and in the while the echo replies are gathered. After MAX_RADAR_WAIT it
  1176. * stops to receive echo replies and it does a statistical analysis of the
  1177. * gathered echo replies, it updates the r_nodes in the map and sends a qspn
  1178. * round if something is changed in the map and if the `activate_qspn' argument
  1179. * is non zero.
  1180. *
  1181. * It returns 1 if another radar_scan is in progress, -1 if something went
  1182. * wrong, 0 on success.
  1183. */
  1184. int
  1185. radar_scan(int activate_qspn)
  1186. {
  1187. pthread_t thread;
  1188. PACKET pkt;
  1189. int i, d, *p;
  1190. ssize_t err;
  1191. u_char echo_scan;
  1192. /* We are already doing a radar scan, that's not good */
  1193. if (radar_scan_mutex)
  1194. return 1;
  1195. radar_scan_mutex = 1;
  1196. /*
  1197. * We create the PACKET
  1198. */
  1199. setzero(&pkt, sizeof(PACKET));
  1200. inet_setip_bcast(&pkt.to, my_family);
  1201. my_echo_id = rand();
  1202. gettimeofday(&scan_start, 0);
  1203. /*
  1204. * Send a bouquet of ECHO_ME pkts
  1205. */
  1206. if (me.cur_node->flags & MAP_HNODE) {
  1207. pkt.hdr.sz = sizeof(u_char);
  1208. pkt.hdr.flags |= HOOK_PKT | BCAST_PKT;
  1209. pkt.msg = xmalloc(pkt.hdr.sz);
  1210. debug(DBG_INSANE, "Radar scan 0x%x activated", my_echo_id);
  1211. } else
  1212. total_radars++;
  1213. if (restricted_mode)
  1214. pkt.hdr.flags |= RESTRICTED_PKT;
  1215. /* Loop through the me.cur_ifs array, sending the bouquet using all the
  1216. * interfaces we have */
  1217. for (d = 0; d < me.cur_ifs_n; d++) {
  1218. pkt_add_dev(&pkt, &me.cur_ifs[d], 1);
  1219. pkt.sk = 0; /* Create a new socket */
  1220. /* Send MAX_RADAR_SCANS# packets using me.cur_ifs[d] as
  1221. * outgoing interface */
  1222. for (i = 0, echo_scan = 0; i < MAX_RADAR_SCANS; i++, echo_scan++) {
  1223. if (me.cur_node->flags & MAP_HNODE)
  1224. memcpy(pkt.msg, &echo_scan, sizeof(u_char));
  1225. err = send_rq(&pkt, 0, ECHO_ME, my_echo_id, 0, 0, 0);
  1226. if (err < 0) {
  1227. if (errno == ENODEV) {
  1228. /*
  1229. * The me.cur_ifs[d] device doesn't
  1230. * exist anymore. Delete it.
  1231. */
  1232. fatal("The device \"%s\" has been removed",
  1233. me.cur_ifs[d].dev_name);
  1234. ifs_del(me.cur_ifs, &me.cur_ifs_n, d);
  1235. d--;
  1236. } else
  1237. error(ERROR_MSG "Error while sending the"
  1238. " scan 0x%x... skipping",
  1239. ERROR_FUNC, my_echo_id);
  1240. break;
  1241. }
  1242. radar_scans[d]++;
  1243. total_radar_scans++;
  1244. }
  1245. if (!radar_scans[d])
  1246. error("radar_scan(): The scan 0x%x on the %s interface failed."
  1247. " Not a single scan was sent", my_echo_id,
  1248. pkt.dev->dev_name);
  1249. if (pkt.sk > 0)
  1250. inet_close(&pkt.sk);
  1251. }
  1252. pkt_free(&pkt, 1);
  1253. if (!total_radar_scans) {
  1254. error("radar_scan(): The scan 0x%x failed. It wasn't possible "
  1255. "to send a single scan", my_echo_id);
  1256. return -1;
  1257. }
  1258. xtimer(max_radar_wait, max_radar_wait << 1, &radar_wait_counter);
  1259. final_radar_queue();
  1260. radar_update_map();
  1261. if (activate_qspn)
  1262. for (i = 0; i < me.cur_quadg.levels; i++)
  1263. if (send_qspn_now[i]) {
  1264. p = xmalloc(sizeof(int));
  1265. *p = i;
  1266. /* We start a new qspn_round in the `i'-th level */
  1267. pthread_create(&thread, &radar_qspn_send_t_attr,
  1268. radar_qspn_send_t, (void *) p);
  1269. }
  1270. if (!(me.cur_node->flags & MAP_HNODE))
  1271. reset_radar();
  1272. radar_scan_mutex = 0;
  1273. return 0;
  1274. }
  1275. /*
  1276. * radard
  1277. *
  1278. * It sends back to rpkt.from the ECHO_REPLY pkt in reply to the ECHO_ME
  1279. * pkt received.
  1280. */
  1281. int
  1282. radard(PACKET rpkt)
  1283. {
  1284. PACKET pkt;
  1285. struct radar_queue *rq;
  1286. ssize_t err;
  1287. const char *ntop = 0;
  1288. int dev_pos;
  1289. u_char echo_scans_count;
  1290. if (alwd_rnodes_counter && !is_rnode_allowed(rpkt.from, alwd_rnodes)) {
  1291. debug(DBG_INSANE, "Filtering 0x%x ECHO_ME", rpkt.hdr.id);
  1292. return -1;
  1293. }
  1294. if ((rpkt.hdr.flags & RESTRICTED_PKT && !restricted_mode) ||
  1295. (!(rpkt.hdr.flags & RESTRICTED_PKT) && restricted_mode))
  1296. return -1;
  1297. dev_pos = ifs_get_pos(me.cur_ifs, me.cur_ifs_n, rpkt.dev);
  1298. if (dev_pos < 0)
  1299. debug(DBG_NORMAL, "The 0x%x ECHO_ME pkt was received by a non "
  1300. "existent interface", rpkt.hdr.id);
  1301. /* If we are hooking we reply only to others hooking nodes */
  1302. if (me.cur_node->flags & MAP_HNODE) {
  1303. if (rpkt.hdr.flags & HOOK_PKT) {
  1304. memcpy(&echo_scans_count, rpkt.msg, sizeof(u_char));
  1305. /*
  1306. * So, we are hooking, but we haven't yet started the
  1307. * first scan or we have done less scans than rpkt.from,
  1308. * this means that this node, who is hooking
  1309. * too and sent us this rpkt, has started the hook
  1310. * before us. If we are in a black zone, this flag
  1311. * will be used to decide which of the hooking nodes
  1312. * have to create the new gnode: if it is set we'll wait,
  1313. * the other hooking node will create the gnode, then we
  1314. * restart the hook. Clear?
  1315. */
  1316. if (!radar_scan_mutex
  1317. || echo_scans_count >= radar_scans[dev_pos])
  1318. hook_retry = 1;
  1319. } else {
  1320. /*debug(DBG_NOISE, "ECHO_ME pkt dropped: We are hooking"); */
  1321. return 0;
  1322. }
  1323. }
  1324. /* We create the ECHO_REPLY pkt */
  1325. setzero(&pkt, sizeof(PACKET));
  1326. pkt_addto(&pkt, &rpkt.from);
  1327. pkt_addsk(&pkt, rpkt.from.family, rpkt.sk, SKT_UDP);
  1328. if (me.cur_node->flags & MAP_HNODE) {
  1329. /*
  1330. * We attach in the ECHO_REPLY a flag that indicates if we have
  1331. * finished our radar_scan or not. This is usefull if we already
  1332. * sent all the ECHO_ME pkts of our radar scan and while we are
  1333. * waiting the MAX_RADAR_WAIT another node start the hooking:
  1334. * with this flag it can know if we came before him.
  1335. */
  1336. u_char scanning = 1;
  1337. pkt.hdr.sz = sizeof(u_char);
  1338. pkt.hdr.flags |= HOOK_PKT;
  1339. pkt.msg = xmalloc(pkt.hdr.sz);
  1340. if (radar_scans[dev_pos] == MAX_RADAR_SCANS)
  1341. scanning = 0;
  1342. memcpy(pkt.msg, &scanning, sizeof(u_char));
  1343. /*
  1344. * W Poetry Palazzolo, the enlightening holy garden.
  1345. * Sat Mar 12 20:41:36 CET 2005
  1346. */
  1347. }
  1348. if (restricted_mode)
  1349. pkt.hdr.flags |= RESTRICTED_PKT;
  1350. /* We send it */
  1351. err = send_rq(&pkt, 0, ECHO_REPLY, rpkt.hdr.id, 0, 0, 0);
  1352. pkt_free(&pkt, 0);
  1353. if (err < 0) {
  1354. error("radard(): Cannot send back the ECHO_REPLY to %s.", ntop);
  1355. return -1;
  1356. }
  1357. /*
  1358. * Ok, we have sent the reply, now we can update the radar_queue with
  1359. * calm.
  1360. */
  1361. if (radar_q) {
  1362. rq = add_radar_q(rpkt);
  1363. rq->pings++;
  1364. #ifdef DEBUG
  1365. if (server_opt.dbg_lvl && rq->pings == 1 &&
  1366. me.cur_node->flags & MAP_HNODE) {
  1367. ntop = inet_to_str(pkt.to);
  1368. debug(DBG_INSANE, "%s(0x%x) to %s", rq_to_str(ECHO_REPLY),
  1369. rpkt.hdr.id, ntop);
  1370. }
  1371. #endif
  1372. }
  1373. return 0;
  1374. }
  1375. /*
  1376. * refresh_hook_root_node
  1377. *
  1378. * At hooking the radar_scan doesn't have an int_map, so
  1379. * all the nodes it found are stored in fake nodes. When we finish the hook,
  1380. * instead, we have an int_map, so we convert all this fake nodes into real
  1381. * nodes. To do this we modify each rq->node of the radar_queue and recall the
  1382. * radar_update_map() func.
  1383. * rnode_list and qspn_b are also updated.
  1384. * Note: the me.cur_node must be deleted prior the call of this function.
  1385. */
  1386. int
  1387. refresh_hook_root_node(void)
  1388. {
  1389. struct radar_queue *rq;
  1390. map_node *rnode;
  1391. int ret;
  1392. rq = radar_q;
  1393. list_for(rq) {
  1394. ret = iptomap((uintptr_t) me.int_map, rq->ip, me.cur_quadg.ipstart[1],
  1395. &rnode);
  1396. if (ret)
  1397. rq->node = (map_node *) RADQ_EXT_RNODE;
  1398. else
  1399. rq->node = rnode;
  1400. }
  1401. radar_update_map();
  1402. /*
  1403. * Remove all the rnode_list structs which refers to the fake
  1404. * rnodes.
  1405. */
  1406. rnl_del_dead_rnode(&rlist, &rlist_counter, me.cur_node);
  1407. /* Do the same for the qspn_b buffer */
  1408. qspn_b_del_all_dead_rnodes();
  1409. return 0;
  1410. }
  1411. /*
  1412. * radar_daemon
  1413. *
  1414. * keeps the radar up until the end of the universe.
  1415. */
  1416. void *
  1417. radar_daemon(void *null)
  1418. {
  1419. /* If `radar_daemon_ctl' is set to 0 the radar_daemon will stop.
  1420. * It will restart when it becomes again 1 */
  1421. radar_daemon_ctl = 1;
  1422. debug(DBG_NORMAL, "Radar daemon up & running");
  1423. for (;;) {
  1424. while (!radar_daemon_ctl)
  1425. sleep(1);
  1426. radar_scan(1);
  1427. }
  1428. }
  1429. /*
  1430. * radar_wait_new_scan
  1431. *
  1432. * It sleeps until a new radar scan is sent
  1433. */
  1434. void
  1435. radar_wait_new_scan(void)
  1436. {
  1437. int old_echo_id, old_radar_wait_counter;
  1438. old_echo_id = my_echo_id;
  1439. old_radar_wait_counter = radar_wait_counter;
  1440. for (; old_echo_id == my_echo_id;) {
  1441. usleep(505050);
  1442. /* If the radar_wait_counter doesn't change, that means that
  1443. * the radar isn't active */
  1444. if (radar_wait_counter == old_radar_wait_counter)
  1445. break;
  1446. }
  1447. }
  1448. /*EoW*/