lw_AFTR: IP Checksum required on generated packet.
[samplevnf.git] / VNFs / DPPD-PROX / handle_ipv6_tunnel.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_ip.h>
18 #include <rte_udp.h>
19 #include <rte_tcp.h>
20 #include <rte_table_hash.h>
21 #include <rte_ether.h>
22 #include <rte_version.h>
23 #include <rte_byteorder.h>
24
25 #include "prox_lua.h"
26 #include "prox_lua_types.h"
27
28 #include "tx_pkt.h"
29 #include "task_init.h"
30 #include "task_base.h"
31 #include "prox_port_cfg.h"
32 #include "prefetch.h"
33 #include "lconf.h"
34 #include "hash_utils.h"
35 #include "etypes.h"
36 #include "prox_cksum.h"
37 #include "defines.h"
38 #include "log.h"
39 #include "quit.h"
40 #include "prox_cfg.h"
41 #include "parse_utils.h"
42 #include "cfgfile.h"
43 #include "prox_shared.h"
44
45 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
46 #define IPPROTO_IPIP IPPROTO_IPV4
47 #endif
48
49 struct ipv6_tun_dest {
50         struct ipv6_addr  dst_addr;
51         struct ether_addr dst_mac;
52 };
53
54 typedef enum ipv6_tun_dir_t {
55         TUNNEL_DIR_ENCAP = 0,
56         TUNNEL_DIR_DECAP = 1,
57 } ipv6_tun_dir_t;
58
59 struct task_ipv6_tun_base {
60         struct task_base        base;
61         struct ether_addr       src_mac;
62         uint8_t                 core_nb;
63         uint64_t                keys[64];
64         struct rte_mbuf*        fake_packets[64];
65         uint16_t                lookup_port_mask;  // Mask used before looking up the port
66         void*                   lookup_table;      // Fast lookup table for bindings
67         uint32_t                runtime_flags;
68         int                     offload_crc;
69 };
70
71 struct task_ipv6_decap {
72         struct task_ipv6_tun_base   base;
73         struct ether_addr           dst_mac;
74 };
75
76 struct task_ipv6_encap {
77         struct task_ipv6_tun_base   base;
78         uint32_t                    ipaddr;
79         struct ipv6_addr            local_endpoint_addr;
80         uint8_t                     tunnel_hop_limit;
81 };
82
83 #define IPv6_VERSION 6
84 #ifndef IPPROTO_IPV4
85 #define IPPROTO_IPV4    4
86 #endif
87
88 #define MAKE_KEY_FROM_FIELDS(ipv4_addr, port, port_mask) ( ((uint64_t)ipv4_addr << 16) | (port & port_mask) )
89
90 static int handle_ipv6_decap_bulk(struct task_base* tbase, struct rte_mbuf** rx_mbuf, const uint16_t n_pkts);
91 static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** rx_mbuf, const uint16_t n_pkts);
92
93 static void init_lookup_table(struct task_ipv6_tun_base* ptask, struct task_args *targ)
94 {
95         const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
96
97         /* The lookup table is a per-core data structure to reduce the
98            memory footprint and improve cache utilization. Since
99            operations on the hash table are not safe, the data
100            structure can't be used on a per socket or on a system wide
101            basis. */
102         ptask->lookup_table = prox_sh_find_core(targ->lconf->id, "ipv6_binding_table");
103         if (NULL == ptask->lookup_table) {
104                 struct ipv6_tun_binding_table *table;
105                 PROX_PANIC(!strcmp(targ->tun_bindings, ""), "No tun bindings specified\n");
106                 int ret = lua_to_ip6_tun_binding(prox_lua(), GLOBAL, targ->tun_bindings, socket_id, &table);
107                 PROX_PANIC(ret, "Failed to read tun_bindings config:\n %s\n", get_lua_to_errors());
108
109                 struct rte_table_hash_key8_ext_params table_hash_params = {
110                         .n_entries = (table->num_binding_entries * 4),
111                         .n_entries_ext = (table->num_binding_entries * 2) >> 1,
112                         .f_hash = hash_crc32,
113                         .seed = 0,
114                         .signature_offset = HASH_METADATA_OFFSET(8),  // Ignored for dosig tables
115                         .key_offset = HASH_METADATA_OFFSET(0),
116                 };
117                 plogx_info("IPv6 Tunnel allocating lookup table on socket %d\n", socket_id);
118                 ptask->lookup_table = rte_table_hash_key8_ext_dosig_ops.
119                                 f_create(&table_hash_params, socket_id, sizeof(struct ipv6_tun_dest));
120                 PROX_PANIC(ptask->lookup_table == NULL, "Error creating IPv6 Tunnel lookup table");
121
122                 for (unsigned idx = 0; idx < table->num_binding_entries; idx++) {
123                         int key_found = 0;
124                         void* entry_in_hash = NULL;
125                         struct ipv6_tun_dest data;
126                         struct ipv6_tun_binding_entry* entry = &table->entry[idx];
127                         uint64_t key = MAKE_KEY_FROM_FIELDS(rte_cpu_to_be_32(entry->public_ipv4), entry->public_port, ptask->lookup_port_mask);
128                         rte_memcpy(&data.dst_addr, &entry->endpoint_addr, sizeof(struct ipv6_addr));
129                         rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(struct ether_addr));
130
131                         int ret = rte_table_hash_key8_ext_dosig_ops.f_add(ptask->lookup_table, &key, &data, &key_found, &entry_in_hash);
132                         PROX_PANIC(ret, "Error adding entry (%d) to binding lookup table", idx);
133                         PROX_PANIC(key_found, "key_found!!! for idx=%d\n", idx);
134
135 #ifdef DBG_IPV6_TUN_BINDING
136                         plog_info("Bind: %x:0x%x (port_mask 0x%x) key=0x%"PRIx64"\n", entry->public_ipv4, entry->public_port, ptask->lookup_port_mask, key);
137                         plog_info("  -> "IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(entry->endpoint_addr.bytes), MAC_BYTES(entry->next_hop_mac.addr_bytes));
138                         plog_info("  -> "IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(data.dst_addr.bytes), MAC_BYTES(data.dst_mac.addr_bytes));
139                         plog_info("  -> entry_in_hash=%p\n", entry_in_hash);
140 #endif
141                 }
142                 plogx_info("IPv6 Tunnel created %d lookup table entries\n", table->num_binding_entries);
143
144                 prox_sh_add_core(targ->lconf->id, "ipv6_binding_table", ptask->lookup_table);
145         }
146 }
147
148 static void init_task_ipv6_tun_base(struct task_ipv6_tun_base* tun_base, struct task_args* targ)
149 {
150         memcpy(&tun_base->src_mac, find_reachable_port(targ), sizeof(tun_base->src_mac));
151
152         tun_base->lookup_port_mask = targ->lookup_port_mask;  // Mask used before looking up the port
153
154         init_lookup_table(tun_base, targ);
155
156         for (uint32_t i = 0; i < 64; ++i) {
157                 tun_base->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&tun_base->keys[i] - sizeof (struct rte_mbuf));
158         }
159
160         plogx_info("IPv6 Tunnel MAC="MAC_BYTES_FMT" port_mask=0x%x\n",
161                   MAC_BYTES(tun_base->src_mac.addr_bytes), tun_base->lookup_port_mask);
162
163         struct prox_port_cfg *port = find_reachable_port(targ);
164         if (port) {
165                 tun_base->offload_crc = port->capabilities.tx_offload_cksum;
166         }
167 }
168
169 static void init_task_ipv6_decap(struct task_base* tbase, struct task_args* targ)
170 {
171         struct task_ipv6_decap* tun_task = (struct task_ipv6_decap*)tbase;
172         struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)tun_task;
173
174         init_task_ipv6_tun_base(tun_base, targ);
175         tun_base->runtime_flags = targ->runtime_flags;
176
177         memcpy(&tun_task->dst_mac, &targ->edaddr, sizeof(tun_task->dst_mac));
178 }
179
180 static void init_task_ipv6_encap(struct task_base* tbase, struct task_args* targ)
181 {
182         struct task_ipv6_encap* tun_task = (struct task_ipv6_encap*)tbase;
183         struct task_ipv6_tun_base *tun_base = (struct task_ipv6_tun_base*)tun_task;
184
185         init_task_ipv6_tun_base(tun_base, targ);
186
187         rte_memcpy(&tun_task->local_endpoint_addr, &targ->local_ipv6, sizeof(tun_task->local_endpoint_addr));
188         tun_task->tunnel_hop_limit = targ->tunnel_hop_limit;
189         tun_base->runtime_flags = targ->runtime_flags;
190 }
191
192 static struct task_init task_init_ipv6_decap = {
193         .mode_str = "ipv6_decap",
194         .init = init_task_ipv6_decap,
195         .handle = handle_ipv6_decap_bulk,
196         .size = sizeof(struct task_ipv6_decap)
197 };
198
199 static struct task_init task_init_ipv6_encap = {
200         .mode_str = "ipv6_encap",
201         .init = init_task_ipv6_encap,
202         .handle = handle_ipv6_encap_bulk,
203         .size = sizeof(struct task_ipv6_encap)
204 };
205
206 __attribute__((constructor)) static void reg_task_ipv6_decap(void)
207 {
208         reg_task(&task_init_ipv6_decap);
209 }
210
211 __attribute__((constructor)) static void reg_task_ipv6_encap(void)
212 {
213         reg_task(&task_init_ipv6_encap);
214 }
215
216 static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest);
217 static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest);
218
219 static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort)
220 {
221         *pAddr = (dir == TUNNEL_DIR_DECAP) ? pip4->src_addr : pip4->dst_addr;
222
223         if (pip4->next_proto_id == IPPROTO_UDP) {
224                 struct udp_hdr* pudp = (struct udp_hdr *)(pip4 + 1);
225                 *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? pudp->src_port : pudp->dst_port);
226         }
227         else if (pip4->next_proto_id == IPPROTO_TCP) {
228                 struct tcp_hdr* ptcp = (struct tcp_hdr *)(pip4 + 1);
229                 *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? ptcp->src_port : ptcp->dst_port);
230         }
231         else {
232                 plog_warn("IPv6 Tunnel: IPv4 packet of unexpected type proto_id=0x%x\n", pip4->next_proto_id);
233                 *pPort = 0xffff;
234                 return -1;
235         }
236
237         return 0;
238 }
239
240 static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey)
241 {
242         uint32_t lookup_addr;
243         uint16_t lookup_port;
244
245         if (unlikely( extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port))) {
246                 plog_warn("IPv6 Tunnel: Unable to extract fields from packet\n");
247                 *pkey = 0xffffffffL;
248                 return;
249         }
250
251         *pkey = MAKE_KEY_FROM_FIELDS(lookup_addr, lookup_port, ptask->lookup_port_mask);
252 }
253
254 static inline struct ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf)
255 {
256         struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
257         struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
258         struct ipv4_hdr* pip4 = (struct ipv4_hdr*) (pip6 + 1);  // TODO - Skip Option headers
259
260         return pip4;
261 }
262
263 static inline struct ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf)
264 {
265         struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
266         struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
267
268         return pip4;
269 }
270
271 static inline void extract_key_decap(struct task_ipv6_tun_base* ptask, struct rte_mbuf *mbuf, uint64_t* pkey)
272 {
273         extract_key(ptask, get_ipv4_decap(mbuf), TUNNEL_DIR_DECAP, pkey);
274 }
275
276 static inline void extract_key_decap_bulk(struct task_ipv6_tun_base* ptask, struct rte_mbuf **mbufs, uint16_t n_pkts)
277 {
278         for (uint16_t j = 0; j < n_pkts; ++j) {
279                 extract_key_decap(ptask, mbufs[j], &ptask->keys[j]);
280         }
281 }
282
283 static inline void extract_key_encap(struct task_ipv6_tun_base* ptask, struct rte_mbuf *mbuf, uint64_t* pkey)
284 {
285         extract_key(ptask, get_ipv4_encap(mbuf), TUNNEL_DIR_ENCAP, pkey);
286 }
287
288 static inline void extract_key_encap_bulk(struct task_ipv6_tun_base* ptask, struct rte_mbuf **mbufs, uint16_t n_pkts)
289 {
290         for (uint16_t j = 0; j < n_pkts; ++j) {
291                 extract_key_encap(ptask, mbufs[j], &ptask->keys[j]);
292         }
293 }
294
295 __attribute__((cold)) static void handle_error(struct task_ipv6_tun_base* ptask, struct rte_mbuf* mbuf, ipv6_tun_dir_t dir)
296 {
297         uint32_t lookup_addr;
298         uint16_t lookup_port;
299         uint64_t key;
300
301         struct ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf);
302         extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port);
303         extract_key(ptask, pip4, dir, &key);
304
305         plog_warn("IPv6 Tunnel (%s) lookup failed for "IPv4_BYTES_FMT":%d [key=0x%"PRIx64"]\n",
306                         (dir == TUNNEL_DIR_DECAP) ? "decap" : "encap",
307                         IPv4_BYTES(((unsigned char*)&lookup_addr)), lookup_port, key);
308 }
309
310 static int handle_ipv6_decap_bulk(struct task_base* tbase, struct rte_mbuf** mbufs, const uint16_t n_pkts)
311 {
312         struct task_ipv6_decap* task = (struct task_ipv6_decap *)tbase;
313         uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
314         struct ipv6_tun_dest* entries[64];
315         uint8_t out[MAX_PKT_BURST];
316         uint64_t lookup_hit_mask;
317         uint16_t n_kept = 0;
318
319         prefetch_pkts(mbufs, n_pkts);
320
321         // Lookup to verify packets are valid for their respective tunnels (their sending lwB4)
322         extract_key_decap_bulk(&task->base, mbufs, n_pkts);
323         rte_table_hash_key8_ext_dosig_ops.f_lookup(task->base.lookup_table, task->base.fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
324
325         if (likely(lookup_hit_mask == pkts_mask)) {
326                 for (uint16_t j = 0; j < n_pkts; ++j) {
327                         out[j] = handle_ipv6_decap(task, mbufs[j], entries[j]);
328                 }
329         }
330         else {
331                 for (uint16_t j = 0; j < n_pkts; ++j) {
332                         if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
333                                 handle_error(&task->base, mbufs[j], TUNNEL_DIR_DECAP);
334                                 out[j] = OUT_DISCARD;
335                                 continue;
336                         }
337                         out[j] = handle_ipv6_decap(task, mbufs[j], entries[j]);
338                 }
339         }
340
341         return task->base.base.tx_pkt(tbase, mbufs, n_pkts, out);
342 }
343
344 static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** mbufs, const uint16_t n_pkts)
345 {
346         struct task_ipv6_encap* task = (struct task_ipv6_encap *)tbase;
347         uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
348         struct ipv6_tun_dest* entries[64];
349         uint64_t lookup_hit_mask;
350         uint8_t out[MAX_PKT_BURST];
351         uint16_t n_kept = 0;
352
353         prefetch_first(mbufs, n_pkts);
354
355         extract_key_encap_bulk(&task->base, mbufs, n_pkts);
356         rte_table_hash_key8_ext_dosig_ops.f_lookup(task->base.lookup_table, task->base.fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
357
358         if (likely(lookup_hit_mask == pkts_mask)) {
359                 for (uint16_t j = 0; j < n_pkts; ++j) {
360                         out[j] = handle_ipv6_encap(task, mbufs[j], entries[j]);
361                 }
362         }
363         else {
364                 for (uint16_t j = 0; j < n_pkts; ++j) {
365                         if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
366                                 handle_error(&task->base, mbufs[j], TUNNEL_DIR_ENCAP);
367                                 out[j] = OUT_DISCARD;
368                                 continue;
369                         }
370                         out[j] = handle_ipv6_encap(task, mbufs[j], entries[j]);
371                 }
372         }
373
374         return task->base.base.tx_pkt(tbase, mbufs, n_pkts, out);
375 }
376
377 static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest)
378 {
379         struct ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *);
380         struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask;
381         struct ipv4_hdr* pip4 = NULL;
382
383         if (unlikely(peth->ether_type != ETYPE_IPv6)) {
384                 plog_warn("Received non IPv6 packet on ipv6 tunnel port\n");
385                 // Drop packet
386                 return OUT_DISCARD;
387         }
388
389         struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
390         int ipv6_hdr_len = sizeof(struct ipv6_hdr);
391
392         // TODO - Skip over any IPv6 Extension Header:
393         //      If pip6->next_header is in (0, 43, 44, 50, 51, 60, 135), skip ahead pip->hdr_ext_len
394         //      bytes and repeat. Increase ipv6_hdr_len with as much, each time.
395
396         if (unlikely(pip6->proto != IPPROTO_IPIP)) {
397                 plog_warn("Received non IPv4 content within IPv6 tunnel packet\n");
398                 // Drop packet
399                 return OUT_DISCARD;
400         }
401
402         // Discard IPv6 encapsulation
403         rte_pktmbuf_adj(rx_mbuf, ipv6_hdr_len);
404         peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *);
405         pip4 = (struct ipv4_hdr *)(peth + 1);
406
407         // Restore Ethernet header
408         ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
409         ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
410         peth->ether_type = ETYPE_IPv4;
411
412 #ifdef GEN_DECAP_IPV6_TO_IPV4_CKSUM
413         // generate an IP checksum for ipv4 packet
414         if (tun_base->runtime_flags & TASK_TX_CRC) {
415                 prox_ip_cksum(rx_mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), ptask->base.offload_crc);
416         }
417 #endif
418
419         return 0;
420 }
421
422 static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest)
423 {
424         //plog_info("Found tunnel endpoint:"IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(tun_dest->dst_addr), MAC_BYTES(tun_dest->dst_mac.addr_bytes));
425
426         struct ether_hdr* peth = (struct ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *));
427         struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
428         uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
429         struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask;
430
431         if (unlikely((pip4->version_ihl >> 4) != 4)) {
432                 plog_warn("Received non IPv4 packet at ipv6 tunnel input\n");
433                 // Drop packet
434                 return OUT_DISCARD;
435         }
436
437         if (pip4->time_to_live) {
438                 pip4->time_to_live--;
439         }
440         else {
441                 plog_info("TTL = 0 => Dropping\n");
442                 return OUT_DISCARD;
443         }
444         pip4->hdr_checksum = 0;
445
446         // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
447         int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(struct ether_hdr));
448         if (unlikely(padding > 0)) {
449                 rte_pktmbuf_trim(rx_mbuf, padding);
450         }
451
452         // Encapsulate
453         const int extra_space = sizeof(struct ipv6_hdr);
454         peth = (struct ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space);
455
456         // Ethernet Header
457         ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
458         ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr);
459         peth->ether_type = ETYPE_IPv6;
460
461         // Set up IPv6 Header
462         struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
463         pip6->vtc_flow = rte_cpu_to_be_32(IPv6_VERSION << 28);
464         pip6->proto = IPPROTO_IPIP;
465         pip6->payload_len = rte_cpu_to_be_16(ipv4_length);
466         pip6->hop_limits = ptask->tunnel_hop_limit;
467         rte_memcpy(pip6->dst_addr, &tun_dest->dst_addr, sizeof(pip6->dst_addr));
468         rte_memcpy(pip6->src_addr, &ptask->local_endpoint_addr, sizeof(pip6->src_addr));
469
470         if (tun_base->runtime_flags & TASK_TX_CRC) {
471         // We modified the TTL in the IPv4 header, hence have to recompute the IPv4 checksum
472 #define TUNNEL_L2_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr))
473                 prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(struct ipv4_hdr), ptask->base.offload_crc);
474         }
475         return 0;
476 }