Vincent Tech Blog

Thursday, March 9, 2017

Intrusion Prevention System - Snort

A typical process flow:


 221         /* Not a completely ideal place for this since any entries added on the
 222          * PacketCallback -> ProcessPacket -> Preprocess trail will get
 223          * obliterated - right now there isn't anything adding entries there.
 224          * Really need it here for stream5 clean exit, since all of the
 225          * flushed, reassembled packets are going to be injected directly into
 226          * this function and there may be enough that the obfuscation entry
 227          * table will overflow if we don't reset it.  Putting it here does
 228          * have the advantage of fewer entries per logging cycle */




SnortMain -> PacketLoop -> PacketCallback -> ProcessPacket -> Preprocess ->Detect ->fpEvalPacket ->fpEvalHeaderTcp

Wednesday, May 18, 2016

Patch to make mTCP running in VM environment to send packet between VM and physical server across switches

when running mTCP in VM environment like in VMware ESXi, KVM...the source MAC is zero see https://github.com/eunyoung14/mtcp/issues/51, this could result in packet being dropped, following patches avoid adding DPDK PMD driver for mTCP to save the porting effort.




 diff --git a/mtcp/src/io_module.c b/mtcp/src/io_module.c
 index ad3e01d..83e0893 100644
 --- a/mtcp/src/io_module.c
 +++ b/mtcp/src/io_module.c
 @@ -63,6 +63,22 @@ GetNumQueues()
     return queue_cnt;
  }
  /*----------------------------------------------------------------------------*/
 +
 +static int GetPortIndex(char *dev_name)
 +{
 +    char *p = dev_name;
 +    long val = -1;
 +    while (*p) { // While there are more characters to process...
 +        if (isdigit(*p)) { // Upon finding a digit, ...
 +            val = strtol(p, &p, 10); // Read a number, ...
 +        } else {
 +            p++;
 +        }
 +    }
 +    return (int)val;
 +}
 +
 +
  int
  SetInterfaceInfo(char* dev_name_list)
  {
 @@ -243,9 +259,10 @@ SetInterfaceInfo(char* dev_name_list)
                     CONFIG.eths[eidx].ip_addr = *(uint32_t *)&sin;
                 }
 -                if (ioctl(sock, SIOCGIFHWADDR, &ifr) == 0 ) {
 +                if(strstr(iter_if->ifa_name, "dpdk") != NULL) {
 +                    ret = GetPortIndex(iter_if->ifa_name);
                     for (j = 0; j < ETH_ALEN; j ++) {
 -                        CONFIG.eths[eidx].haddr[j] = ifr.ifr_addr.sa_data[j];
 +                        CONFIG.eths[eidx].haddr[j] = ports_eth_addr[ret].addr_bytes[j];
                     }
                 }

DPDK pktgen to generate SYN flood

hack patch to make pktgen to do syn flood:



diff --git a/app/cmd-functions.c b/app/cmd-functions.c
 index b2fda7c..c348e73 100644
 --- a/app/cmd-functions.c
 +++ b/app/cmd-functions.c
 @@ -303,6 +303,8 @@ const char *help_info[] = {
     "pkt.size max <portlist> value   - Set pkt size maximum address",
     "pkt.size inc <portlist> value   - Set pkt size increment address",
     "range <portlist> <state>      - Enable or Disable the given portlist for sending a range of packets",
 +    "range.proto <portlist> [tcp|udp|icmp]",
 +    "                  - Set ip proto for sending a range of packets",
     "",
     "<<PageBreak>>",
     "    Flags: P---------------- - Promiscuous mode enabled",
 diff --git a/app/pktgen-tcp.c b/app/pktgen-tcp.c
 index 3c8a853..9d12a88 100644
 --- a/app/pktgen-tcp.c
 +++ b/app/pktgen-tcp.c
 @@ -69,6 +69,26 @@
  #include "pktgen-tcp.h"
 +uint64_t xor_seed[ 2 ];
 +
 +static inline uint64_t
 +xor_next(void) {
 +    uint64_t s1 = xor_seed[ 0 ];
 +    const uint64_t s0 = xor_seed[ 1 ];
 +
 +    xor_seed[ 0 ] = s0;
 +    s1 ^= s1 << 23;                 /* a */
 +    return ( xor_seed[ 1 ] = ( s1 ^ s0 ^ ( s1 >> 17 ) ^ ( s0 >> 26 ) ) ) +
 +        s0;               /* b, c */
 +}
 +
 +static __inline__ uint32_t
 +pktgen_default_rnd_func(void)
 +{
 +    return xor_next();
 +}
 +
 +
  /**************************************************************************//**
  *
  * pktgen_tcp_hdr_ctor - TCP header constructor routine.
 @@ -100,10 +120,10 @@ pktgen_tcp_hdr_ctor(pkt_seq_t *pkt, tcpip_t *tip, int type __rte_unused)
     tip->tcp.sport   = htons(pkt->sport);
     tip->tcp.dport   = htons(pkt->dport);
 -    tip->tcp.seq    = htonl(DEFAULT_PKT_NUMBER);
 -    tip->tcp.ack    = htonl(DEFAULT_ACK_NUMBER);
 +    tip->tcp.seq    = htonl(pktgen_default_rnd_func());
 +    tip->tcp.ack    = 0;
     tip->tcp.offset   = ((sizeof(tcpHdr_t) / sizeof(uint32_t)) << 4);   /* Offset in words */
 -    tip->tcp.flags   = ACK_FLAG;                     /* ACK */
 +    tip->tcp.flags   = SYN_FLAG;                     /* ACK */
     tip->tcp.window   = htons(DEFAULT_WND_SIZE);
     tip->tcp.urgent   = 0;


root@pktgen-template:/home/admin/pktgen-dpdk/dpdk/examples/pktgen-dpdk# ./app/app/x86_64-native-linuxapp-gcc/pktgen -c ff   -- -P -m "[0:0-7].0 "
 Copyright (c) <2010-2016>, Intel Corporation. All rights reserved.
   Pktgen created by: Keith Wiles -- >>> Powered by Intel® DPDK <<<

Lua 5.3.2  Copyright (C) 1994-2015 Lua.org, PUC-Rio
>>> Packet Burst 32, RX Desc 512, TX Desc 512, mbufs/port 4096, mbuf cache 512

=== port to lcore mapping table (# lcores 8) ===
   lcore:     0     1     2     3     4     5     6     7
port   0:  D: T  0: 1  0: 1  0: 1  0: 1  0: 1  0: 1  0: 1 =  1: 8
Total   :  1: 1  0: 1  0: 1  0: 1  0: 1  0: 1  0: 1  0: 1
    Display and Timer on lcore 0, rx:tx counts per port/lcore

Configuring 1 ports, MBUF Size 1920, MBUF Cache Size 512
Lcore:
    0, RX-TX
                RX( 1): ( 0: 0)
                TX( 1): ( 0: 0)
    1, TX-Only
                TX( 1): ( 0: 1)
    2, TX-Only
                TX( 1): ( 0: 2)
    3, TX-Only
                TX( 1): ( 0: 3)
    4, TX-Only
                TX( 1): ( 0: 4)
    5, TX-Only
                TX( 1): ( 0: 5)
    6, TX-Only
                TX( 1): ( 0: 6)
    7, TX-Only
                TX( 1): ( 0: 7)

Port :
    0, nb_lcores  8, private 0x8f0690, lcores:  0  1  2  3  4  5  6  7



** Dev Info (rte_vmxnet3_pmd:0) **
   max_vfs        :   0 min_rx_bufsize    :1646 max_rx_pktlen : 16384 max_rx_queues         :  16 max_tx_queues:   8
   max_mac_addrs  :   1 max_hash_mac_addrs:   0 max_vmdq_pools:     0
   rx_offload_capa:  13 tx_offload_capa   :  45 reta_size     :     0 flow_type_rss_offloads:0000000000000514
   vmdq_queue_base:   0 vmdq_queue_num    :   0 vmdq_pool_base:     0
** RX Conf **
   pthreash       :   0 hthresh          :   0 wthresh        :     0
   Free Thresh    :   0 Drop Enable      :   0 Deferred Start :     0
** TX Conf **
   pthreash       :   0 hthresh          :   0 wthresh        :     0
   Free Thresh    :   0 RS Thresh        :   0 Deferred Start :     0 TXQ Flags:00000200

Initialize Port 0 -- TxQ 8, RxQ 1,  Src MAC 00:50:56:86:10:76
Pktgen > load tcp.txt
Pktgen> start 0
Pktgen> stop 0
root@pktgen-template:/home/admin/pktgen-dpdk/dpdk/examples/pktgen-dpdk# cat tcp.txt
#
# Pktgen - Ver: 2.9.17 (DPDK 16.04.0-rc2)
# Copyright (c) <2010-2016>, Intel Corporation. All rights reserved., Powered by Intel® DPDK

# Command line arguments: (DPDK args are defaults)
# ./app/app/x86_64-native-linuxapp-gcc/pktgen -c ff -n 3 -m 512 --proc-type primary -- -P -m [0:1-7].0

#######################################################################
# Pktgen Configuration script information:
#   GUI socket is Not Enabled
#   Flags 00040004
#   Number of ports: 1
#   Number ports per page: 4
#   Number descriptors: RX 512 TX: 512
#   Promiscuous mode is Enabled


#######################################################################
# Global configuration:
geometry 132x44
mac_from_arp disable

######################### Port  0 ##################################
#
# Port:  0, Burst: 32, Rate:100%, Flags:c0000010, TX Count:Forever
#           SeqCnt:0, Prime:1 VLAN ID:0001, Link:
#
# Set up the primary port information:
set 0 count 0
set 0 size 64
set 0 rate 100
set 0 burst 32
set 0 sport 1234
set 0 dport 5678
set 0 prime 1
type ipv4 0
proto tcp 0
set ip dst 0 10.1.72.17
#set ip dst 0 10.1.72.8
set ip src 0 10.1.72.154/24
set mac 0 00:23:E9:63:5B:83
#set mac 0 00:50:56:86:84:90
vlanid 0 1

pattern 0 zero
user.pattern 0 0123456789abcdef

latency 0 disable
mpls 0 disable
mpls_entry 0 0
qinq 0 disable
qinqids 0 0 0
gre 0 disable
gre_eth 0 disable
gre_key 0 0
#
# Port flag values:
icmp.echo 0 disable
pcap 0 disable
range 0 enable
process 0 disable
capture 0 disable
rxtap 0 disable
txtap 0 disable
vlan 0 disable

#
# Range packet information:
src.mac start 0 00:50:56:86:10:76
src.mac min 0 00:00:00:00:00:00
src.mac max 0 00:00:00:00:00:00
src.mac inc 0 00:00:00:00:00:00
dst.mac start 0 00:23:E9:63:5B:83
#dst.mac start 0 00:50:56:86:84:90
dst.mac min 0 00:00:00:00:00:00
dst.mac max 0 00:00:00:00:00:00
dst.mac inc 0 00:00:00:00:00:00

src.ip start 0 10.1.72.154
src.ip min 0 10.1.72.154
src.ip max 0 10.1.72.254
src.ip inc 0 0.0.0.1

dst.ip start 0 10.1.72.17
dst.ip min 0 10.1.72.17
dst.ip max 0 10.1.72.17
dst.ip inc 0 0.0.0.1

#dst.ip start 0 10.1.72.8
#dst.ip min 0 10.1.72.8
#dst.ip max 0 10.1.72.8
#dst.ip inc 0 0.0.0.1

src.port start 0 1025
src.port min 0 1025
src.port max 0 65512
src.port inc 0 1

dst.port start 0 80
dst.port min 0 0
dst.port max 0 0
dst.port inc 0 0

vlan.id start 0 1
vlan.id min 0 1
vlan.id max 0 4095
vlan.id inc 0 0

pkt.size start 0 64
pkt.size min 0 64
pkt.size max 0 1518
pkt.size inc 0 0

#
# Set up the sequence data for the port.
set 0 seqCnt 0

################################ Done #################################

Wednesday, March 23, 2016

Patch draft to make mTCP+DPDK work in vlan tagged network

Here is a patch idea draft to make mTCP + DPDK work in vlan tagged environment,  the next thing is to figure out how to run mTCP + DPDK in vlan tagged VMware ESXi environment, which would be great to run mTCP + DPDK in VMware ESXi VM and easy to clone the VM for everybody need it

 diff --git a/mtcp/src/dpdk_module.c b/mtcp/src/dpdk_module.c  

 index 33d349e..3c08e25 100644
 --- a/mtcp/src/dpdk_module.c
 +++ b/mtcp/src/dpdk_module.c
 @@ -66,7 +66,7 @@ static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
  /* packet memory pools for storing packet bufs */
  static struct rte_mempool *pktmbuf_pool[MAX_CPUS] = {NULL};
 -//#define DEBUG                1
 +#define DEBUG             1
  #ifdef DEBUG
  /* ethernet addresses of ports */
  static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 @@ -79,7 +79,8 @@ static struct rte_eth_conf port_conf = {
         .split_hdr_size =    0,
         .header_split  =    0, /**< Header Split disabled */
         .hw_ip_checksum =    1, /**< IP checksum offload enabled */
 -        .hw_vlan_filter =    0, /**< VLAN filtering disabled */
 +        .hw_vlan_filter =    1, /**< VLAN filtering disabled */
 +        .hw_vlan_strip =    1, /**< VLAN strip enabled */
         .jumbo_frame  =    0, /**< Jumbo Frame Support disabled */
         .hw_strip_crc  =    1, /**< CRC stripped by hardware */
     },
 @@ -127,6 +128,7 @@ static const struct rte_eth_txconf tx_conf = {
     .txq_flags =          0x0,
  };
 +
  struct mbuf_table {
     unsigned len; /* length of queued packets */
     struct rte_mbuf *m_table[MAX_PKT_BURST];
 @@ -266,6 +268,8 @@ dpdk_send_pkts(struct mtcp_thread_context *ctxt, int nif)
                       ctxt->cpu, i, nif);
                 exit(EXIT_FAILURE);
             }
 +            dpc->wmbufs[nif].m_table[i]->ol_flags = PKT_TX_VLAN_PKT;
 +            dpc->wmbufs[nif].m_table[i]->vlan_tci = 4094;
         }
         /* reset the len of mbufs var after flushing of packets */
         dpc->wmbufs[nif].len = 0;
 @@ -534,6 +538,12 @@ dpdk_load_module(void)
             if (ret < 0)
                 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
                      ret, (unsigned) portid);
 +
 +            ret = rte_eth_dev_vlan_filter(portid, 4094, 1);
 +
 +            if (ret < 0)
 +                rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
 +                    ret, (unsigned) portid);
             /* init one RX queue per CPU */
             fflush(stdout);

Friday, March 18, 2016

Patch to make lighttpd run in multiple core properly with mtcp with the configuration

diff --git a/apps/lighttpd-1.4.32/src/server.c b/apps/lighttpd-1.4.32/src/server.c
index 7c76fd7..f0dde58 100644
--- a/apps/lighttpd-1.4.32/src/server.c
+++ b/apps/lighttpd-1.4.32/src/server.c
@@ -1213,7 +1213,8 @@ int
 main(int argc, char **argv) {
 #ifdef MULTI_THREADED
        server **srv_states = NULL;
-       char *conf_file = NULL;
+       //char *conf_file = NULL;
+       char *conf_file = "/etc/mtcp/config/m-lighttpd.conf";
 #ifdef USE_MTCP
        struct mtcp_conf mcfg;
 #endif
@@ -1594,7 +1595,7 @@ main(int argc, char **argv) {
        mcfg.num_cores = cpus;
        mtcp_setconf(&mcfg);
        /* initialize the mtcp context */
-       if (mtcp_init("mtcp.conf")) {
+       if (mtcp_init("/etc/mtcp/config/lighttpd-mtcp.conf")) {
                fprintf(stderr, "Failed to initialize mtcp\n");
                goto clean_up;
        }

diff --git a/mtcp/src/config.c b/mtcp/src/config.c
index c4faea5..b4e24d0 100644
--- a/mtcp/src/config.c
+++ b/mtcp/src/config.c
@@ -23,8 +23,8 @@
 #define MAX_OPTLINE_LEN 1024
 #define ALL_STRING "all"

-static const char *route_file = "config/route.conf";
-static const char *arp_file = "config/arp.conf";
+static const char *route_file = "/etc/mtcp/config/route.conf";
+static const char *arp_file = "/etc/mtcp/config/arp.conf";


the configuration directory looks like:

root@pktgen:/home/pktgen/mtcp# ls -l /etc/mtcp/config/
total 48
-rw-r--r-- 1 root root   530 Mar  4 14:18 arp.conf
-rw-r--r-- 1 root root  1360 Nov 13 10:34 brute-shake.conf
drwxr-xr-x 2 root root  4096 Mar  4 14:43 conf.d
-rw-r--r-- 1 root root  1370 Nov 13 10:32 epwget.conf
-rw-r--r-- 1 root root  1237 Mar  4 14:15 lighttpd-mtcp.conf
-rw-r--r-- 1 root root 11857 Mar  4 14:40 m-lighttpd.conf
-rw-r--r-- 1 root root  3235 Mar  4 14:42 modules.conf
-rw-r--r-- 1 root root   646 Nov 12 20:18 mtcp.conf
-rw-r--r-- 1 root root   352 Mar  4 14:19 route.conf
-rw-r--r-- 1 root root  1366 Nov 13 10:38 synflood.conf


top output:


top - 14:14:15 up 18 days, 35 min,  4 users,  load average: 7.98, 5.51, 2.53
Threads: 304 total,   9 running, 295 sleeping,   0 stopped,   0 zombie

  PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND                                                                                                                                                                  P
15707 root      20   0 14.071g 0.010t   9680 R 99.9 14.9   5:44.92 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        1
15730 root      20   0 14.071g 0.010t   9680 R 99.9 14.9   5:44.93 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        0
15708 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:44.95 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        2
15709 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:45.08 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        3
15710 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:44.99 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        4
15711 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:44.94 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        5
15712 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:44.89 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                                                                                                                        6
15713 root      20   0 14.071g 0.010t   9680 R 99.7 14.9   5:44.96 lighttpd -n 8 -f /etc/mtcp/config/m-lighttpd.conf                

How to re-injecting TCP segment after TCP 4-way close with scapy

At work, I do lot of in-house reproduction of customers issue with lots of different tools, scapy is one I used often to simulate some odd TCP behavior. for example, a customer has a PoS terminal establish tcp connections to our load balancer,  the terminal would FIN+ACK the TCP connection after sending a transaction request (PSH+ACK), our load balancer would FIN+ACK , the PoS terminal send final ACK to close the TCP connection. this is all sound and good.

here is an interesting problem though,  intermittently, some customers PoS terminal would re-send the transaction request (PSH+ACK) about ~1 second later after the TCP 4-way close. This of course will be dropped by load balancer with RST since the TCP connections has been closed and TCP connection flows in memory has been cleared. I think this is a good example to use scapy to simulate the PoS terminal client behavior and show how flexible with scapy to simulate some odd TCP behavior, see the script from my githup:




https://github.com/vincentmli/bash_scripts/blob/master/scapy-http-responder/pshack-after-tcp4wayclose.py

Tuesday, January 19, 2016

Syn flood network virtual server in line rate by mTCP and DPDK

there is a need at my work to come up a solution to do tcp syn flood subnet network destination addresses (network virtual server) like 10.1.1.0/24 or even wildcard virtual server 0.0.0.0/0 in line rate speed to validate the FPGA hardware acceleration for network virtual servers syn cookie handling.

hping3 is well known open source software that can be used tcp syn flooding using kernel raw socket underneath, but only  one destination ip address can be given to hping3 to flood at a time, and kernel raw socket still use Linux kernel driver which is not as fast as DPDK poll mode driver.

and here mTCP and DPDK come to resecue, I could syn flood network virtual server in line rate high speed. here is the mTCP patch idea:

1, add libcidr and link example synflood code with libcidr to parse IP CIDR format at command line
2, modify mTCP mtcp_init_rss to add number of destination addresses as one of parameter
3, comment out the receive side scaling (RSS) for CPU affinity check, otherwise perf top shows GetRSSHash high cpu and low packet rate, as mtcp_connect->FetchAddress->GetRSSCPUCore->GetRSSHash
4, modify mtcp/src/ip_out.c to generate random source ip, be aware not to use rand() as it is not thread-safe function which could cause spin lock contention(_raw_spin_lock high cpu cycles from perf top and low packet rate also see make your program slower with threads  )


detail patch below:



diff --git a/apps/example/synflood.c b/apps/example/synflood.c
index be5129b..9394a0b 100644
--- a/apps/example/synflood.c
+++ b/apps/example/synflood.c
@@ -18,6 +18,7 @@
 #include
 #include
+#include
 #include "cpu.h"
 #include "rss.h"
 #include "http_parsing.h"
@@ -31,6 +32,7 @@

 #define IP_RANGE 16
 #define MAX_IP_STR_LEN 16
+#define MAX_DADDR 254

 #define BUF_SIZE (8*1024)

@@ -71,6 +73,16 @@ static char url[MAX_URL_LEN + 1];
 static in_addr_t daddr;
 static in_port_t dport;
 static in_addr_t saddr;
+
+static struct in_addr minip, ip_index, dest[MAX_DADDR];
+static int a, b, c, d;
+static int ret;
+static int num_daddr;
+static CIDR *addr, *addr2;
+static char *astr;
+static char *szIP;
+static const char *cstr;
+
 /*----------------------------------------------------------------------------*/
 static int total_flows;
 static int flows[MAX_CPUS];
@@ -165,6 +177,8 @@ DestroyContext(thread_context_t ctx)
        mtcp_destroy_context(ctx->mctx);
        free(ctx);
 }
+
+
 /*----------------------------------------------------------------------------*/
 inline int
 CreateConnection(thread_context_t ctx)
@@ -174,6 +188,7 @@ CreateConnection(thread_context_t ctx)
        struct sockaddr_in addr;
        int sockid;
        int ret;
+       int off = 0;
        sockid = mtcp_socket(mctx, AF_INET, SOCK_STREAM, 0);
        if (sockid < 0) {
@@ -187,6 +202,9 @@ CreateConnection(thread_context_t ctx)
                exit(-1);
        }

+       off = ctx->started++ % num_daddr;
+       daddr = dest[off].s_addr;
+
        addr.sin_family = AF_INET;
        addr.sin_addr.s_addr = daddr;
        addr.sin_port = dport;
@@ -201,7 +219,6 @@ CreateConnection(thread_context_t ctx)
                }
        }

-       ctx->started++;
        ctx->pending++;
        ctx->stat.connects++;

@@ -514,7 +531,7 @@ RunWgetMain(void *arg)
        g_stat[core] = &ctx->stat;
        srand(time(NULL));

-       mtcp_init_rss(mctx, saddr, num_ip, daddr, dport);
+       mtcp_init_rss(mctx, saddr, num_ip, daddr, num_daddr, dport);

        n = flows[core];
        if (n == 0) {
@@ -674,6 +691,38 @@ main(int argc, char **argv)
                return FALSE;
        }

+       astr = NULL;
+       addr = cidr_from_str(argv[1]);
+       if(addr ==NULL) {
+               TRACE_CONFIG("Error: Couldn't parse address %s\n", argv[1]);
+               return FALSE;
+       }
+
+        addr2 = cidr_addr_hostmin(addr);
+        astr = cidr_to_str(addr2, CIDR_ONLYADDR);
+
+        ret = sscanf(astr, "%i.%i.%i.%i", &a, &b, &c, &d);
+        if (ret != 4 ) {
+               fprintf(stderr, "Error: Invalid syntax.\n");
+        }
+        minip.s_addr = a << 24 | b << 16 | c << 8 | d;
+
+        /* Num of hosts */
+        cstr = cidr_numhost(addr);
+        num_daddr = atoi(cstr);
+       TRACE_CONFIG("%s: %d\n", "NumOfDestinationHosts", num_daddr);
+
+        for( i=0; i+        {
+               ip_index = minip;
+               ip_index.s_addr += i;
+               ip_index.s_addr = htonl(ip_index.s_addr);
+               dest[i] = ip_index;
+               //szIP = inet_ntoa(ip_index);
+               //szIP = inet_ntoa(dest[i]);
+               //TRACE_CONFIG("IP: %s\n", szIP);
+        }
+
        char* slash_p = strchr(argv[1], '/');
        if (slash_p) {
                strncpy(host, argv[1], slash_p - argv[1]);
@@ -683,8 +732,13 @@ main(int argc, char **argv)
                strncpy(url, "/", 1);
        }

-       daddr = inet_addr(host);
+       daddr = inet_addr(astr);
        dport = htons(80);
+
+       free(astr);
+       cidr_free(addr);
+       cidr_free(addr2);
+
        saddr = INADDR_ANY;

        total_flows = atoi(argv[2]);
diff --git a/mtcp/src/addr_pool.c b/mtcp/src/addr_pool.c
index 2bc4944..5482cb0 100644
--- a/mtcp/src/addr_pool.c
+++ b/mtcp/src/addr_pool.c
@@ -7,6 +7,7 @@

 #define MIN_PORT (1025)
 #define MAX_PORT (65535 + 1)
+#define MAX_NUM_DADDR  254

 /*----------------------------------------------------------------------------*/
 struct addr_entry
@@ -23,10 +24,12 @@ struct addr_map
 struct addr_pool
 {
        struct addr_entry *pool;                /* address pool */
-       struct addr_map *mapper;                /* address map  */
+       struct addr_map *mapper[MAX_NUM_DADDR];         /* address map  */

        uint32_t addr_base;                             /* in host order */
+       uint32_t daddr_base;                            /* in host order */
        int num_addr;                                   /* number of addresses in use */
+       int num_daddr;                                  /* number of addresses in use */

        int num_entry;
        int num_free;
@@ -38,11 +41,11 @@ struct addr_pool
 };
 /*----------------------------------------------------------------------------*/
 addr_pool_t
-CreateAddressPool(in_addr_t addr_base, int num_addr)
+CreateAddressPool(in_addr_t addr_base, int num_addr,  in_addr_t daddr_base, int num_daddr)
 {
        struct addr_pool *ap;
        int num_entry;
-       int i, j, cnt;
+       int i, j, k, cnt;
        in_addr_t addr;
        uint32_t addr_h;

@@ -51,7 +54,7 @@ CreateAddressPool(in_addr_t addr_base, int num_addr)
                return NULL;

        /* initialize address pool */
-       num_entry = num_addr * (MAX_PORT - MIN_PORT);
+       num_entry = num_addr * num_daddr * (MAX_PORT - MIN_PORT);
        ap->pool = (struct addr_entry *)calloc(num_entry, sizeof(struct addr_entry));
        if (!ap->pool) {
                free(ap);
@@ -59,11 +62,13 @@ CreateAddressPool(in_addr_t addr_base, int num_addr)
        }

        /* initialize address map */
-       ap->mapper = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
-       if (!ap->mapper) {
-               free(ap->pool);
-               free(ap);
-               return NULL;
+       for ( i = 0; i < num_daddr; i++) {
+               ap->mapper[i] = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
+               if (!ap->mapper[i]) {
+                       free(ap->pool);
+                       free(ap);
+                       return NULL;
+               }
        }

        TAILQ_INIT(&ap->free_list);
@@ -78,21 +83,25 @@ CreateAddressPool(in_addr_t addr_base, int num_addr)
        pthread_mutex_lock(&ap->lock);

        ap->addr_base = ntohl(addr_base);
+       ap->daddr_base = ntohl(daddr_base);
        ap->num_addr = num_addr;
+       ap->num_daddr = num_daddr;

        cnt = 0;
-       for (i = 0; i < num_addr; i++) {
-               addr_h = ap->addr_base + i;
-               addr = htonl(addr_h);
-               for (j = MIN_PORT; j < MAX_PORT; j++) {
-                       ap->pool[cnt].addr.sin_addr.s_addr = addr;
-                       ap->pool[cnt].addr.sin_port = htons(j);
-                       ap->mapper[i].addrmap[j] = &ap->pool[cnt];
+       for (k = 0; k < num_daddr; k++) {
+               for (i = 0; i < num_addr; i++) {
+                       addr_h = ap->addr_base + i;
+                       addr = htonl(addr_h);
+                       for (j = MIN_PORT; j < MAX_PORT; j++) {
+                               ap->pool[cnt].addr.sin_addr.s_addr = addr;
+                               ap->pool[cnt].addr.sin_port = htons(j);
+                               ap->mapper[k][i].addrmap[j] = &ap->pool[cnt];

-                       TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);
+                               TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);

-                       if ((++cnt) >= num_entry)
-                               break;
+                               if ((++cnt) >= num_entry)
+                                       break;
+                       }
                }
        }
        ap->num_entry = cnt;
@@ -106,11 +115,11 @@ CreateAddressPool(in_addr_t addr_base, int num_addr)
 /*----------------------------------------------------------------------------*/
 addr_pool_t
 CreateAddressPoolPerCore(int core, int num_queues,
-               in_addr_t saddr_base, int num_addr, in_addr_t daddr, in_port_t dport)
+               in_addr_t saddr_base, int num_addr, in_addr_t daddr_base, int num_daddr, in_port_t dport)
 {
        struct addr_pool *ap;
        int num_entry;
-       int i, j, cnt;
+       int i, j, k, cnt;
        in_addr_t saddr;
        uint32_t saddr_h, daddr_h;
        uint16_t sport_h, dport_h;
@@ -123,7 +132,7 @@ CreateAddressPoolPerCore(int core, int num_queues,
                return NULL;

        /* initialize address pool */
-       num_entry = (num_addr * (MAX_PORT - MIN_PORT)) / num_queues;
+       num_entry = (num_addr * num_daddr * (MAX_PORT - MIN_PORT)) / num_queues;
        ap->pool = (struct addr_entry *)calloc(num_entry, sizeof(struct addr_entry));
        if (!ap->pool) {
                free(ap);
@@ -131,11 +140,13 @@ CreateAddressPoolPerCore(int core, int num_queues,
        }

        /* initialize address map */
-       ap->mapper = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
-       if (!ap->mapper) {
-               free(ap->pool);
-               free(ap);
-               return NULL;
+       for ( i = 0; i < num_daddr; i++) {
+               ap->mapper[i] = (struct addr_map *)calloc(num_addr, sizeof(struct addr_map));
+               if (!ap->mapper[i]) {
+                       free(ap->pool);
+                       free(ap);
+                       return NULL;
+               }
        }

        TAILQ_INIT(&ap->free_list);
@@ -150,29 +161,36 @@ CreateAddressPoolPerCore(int core, int num_queues,
        pthread_mutex_lock(&ap->lock);

        ap->addr_base = ntohl(saddr_base);
+       ap->daddr_base = ntohl(daddr_base);
        ap->num_addr = num_addr;
-       daddr_h = ntohl(daddr);
+       ap->num_daddr = num_daddr;
+       daddr_h = ntohl(daddr_base);
        dport_h = ntohs(dport);

        /* search address space to get RSS-friendly addresses */
        cnt = 0;
-       for (i = 0; i < num_addr; i++) {
-               saddr_h = ap->addr_base + i;
-               saddr = htonl(saddr_h);
-               for (j = MIN_PORT; j < MAX_PORT; j++) {
-                       if (cnt >= num_entry)
-                               break;
-
-                       sport_h = j;
-                       rss_core = GetRSSCPUCore(daddr_h, saddr_h, dport_h, sport_h, num_queues, endian_check);
-                       if (rss_core != core)
-                               continue;
-
-                       ap->pool[cnt].addr.sin_addr.s_addr = saddr;
-                       ap->pool[cnt].addr.sin_port = htons(sport_h);
-                       ap->mapper[i].addrmap[j] = &ap->pool[cnt];
-                       TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);
-                       cnt++;
+       for (k = 0; k < num_daddr; k++) {
+               daddr_h = ap->daddr_base + k;
+               for (i = 0; i < num_addr; i++) {
+                       saddr_h = ap->addr_base + i;
+                       saddr = htonl(saddr_h);
+                       for (j = MIN_PORT; j < MAX_PORT; j++) {
+                               if (cnt >= num_entry)
+                                       break;
+
+                               sport_h = j;
+#if 0
+                               rss_core = GetRSSCPUCore(daddr_h, saddr_h, dport_h, sport_h, num_queues, endian_check);
+                               if (rss_core != core)
+                                       continue;
+
+#endif
+                               ap->pool[cnt].addr.sin_addr.s_addr = saddr;
+                               ap->pool[cnt].addr.sin_port = htons(sport_h);
+                               ap->mapper[k][i].addrmap[j] = &ap->pool[cnt];
+                               TAILQ_INSERT_TAIL(&ap->free_list, &ap->pool[cnt], addr_link);
+                               cnt++;
+                       }
                }
        }

@@ -194,6 +212,8 @@ CreateAddressPoolPerCore(int core, int num_queues,
 void
 DestroyAddressPool(addr_pool_t ap)
 {
+       int i;
+
        if (!ap)
                return;

@@ -201,10 +221,13 @@ DestroyAddressPool(addr_pool_t ap)
                free(ap->pool);
                ap->pool = NULL;
        }
+
+       for ( i = 0; i < ap->num_daddr; i++) {

-       if (ap->mapper) {
-               free(ap->mapper);
-               ap->mapper = NULL;
+               if (ap->mapper[i]) {
+                       free(ap->mapper[i]);
+                       ap->mapper[i] = NULL;
+               }
        }

        pthread_mutex_destroy(&ap->lock);
@@ -228,6 +251,7 @@ FetchAddress(addr_pool_t ap, int core, int num_queues,
        pthread_mutex_lock(&ap->lock);

        walk = TAILQ_FIRST(&ap->free_list);
+#if 0
        while (walk) {
                next = TAILQ_NEXT(walk, addr_link);

@@ -240,6 +264,7 @@ FetchAddress(addr_pool_t ap, int core, int num_queues,

                walk = next;
        }
+#endif

        if (walk) {
                *saddr = walk->addr;
@@ -260,35 +285,38 @@ FreeAddress(addr_pool_t ap, const struct sockaddr_in *addr)
 {
        struct addr_entry *walk, *next;
        int ret = -1;
+       int i;

        if (!ap || !addr)
                return -1;

        pthread_mutex_lock(&ap->lock);

-       if (ap->mapper) {
-               uint32_t addr_h = ntohl(addr->sin_addr.s_addr);
-               uint16_t port_h = ntohs(addr->sin_port);
-               int index = addr_h - ap->addr_base;
+       for (i = 0; i < ap->num_daddr; i++) {
+               if (ap->mapper[i]) {
+                       uint32_t addr_h = ntohl(addr->sin_addr.s_addr);
+                       uint16_t port_h = ntohs(addr->sin_port);
+                       int index = addr_h - ap->addr_base;

-               if (index >= 0 || index < ap->num_addr) {
-                       walk = ap->mapper[addr_h - ap->addr_base].addrmap[port_h];
-               } else {
-                       walk = NULL;
-               }
+                       if (index >= 0 || index < ap->num_addr) {
+                               walk = ap->mapper[i][addr_h - ap->addr_base].addrmap[port_h];
+                       } else {
+                               walk = NULL;
+                       }

-       } else {
-               walk = TAILQ_FIRST(&ap->used_list);
-               while (walk) {
-                       next = TAILQ_NEXT(walk, addr_link);
-                       if (addr->sin_port == walk->addr.sin_port &&
-                                       addr->sin_addr.s_addr == walk->addr.sin_addr.s_addr) {
-                               break;
+               } else {
+                       walk = TAILQ_FIRST(&ap->used_list);
+                       while (walk) {
+                               next = TAILQ_NEXT(walk, addr_link);
+                               if (addr->sin_port == walk->addr.sin_port &&
+                                               addr->sin_addr.s_addr == walk->addr.sin_addr.s_addr) {
+                                       break;
+                               }
+
+                               walk = next;
                        }

-                       walk = next;
                }
-
        }

        if (walk) {
diff --git a/mtcp/src/api.c b/mtcp/src/api.c
index 8e25b32..d8aee5b 100644
--- a/mtcp/src/api.c
+++ b/mtcp/src/api.c
@@ -496,7 +496,7 @@ mtcp_accept(mctx_t mctx, int sockid, struct sockaddr *addr, socklen_t *addrlen)
 /*----------------------------------------------------------------------------*/
 int
 mtcp_init_rss(mctx_t mctx, in_addr_t saddr_base, int num_addr,
-               in_addr_t daddr, in_addr_t dport)
+               in_addr_t daddr_base, int num_daddr, in_addr_t dport)
 {
        mtcp_manager_t mtcp;
        addr_pool_t ap;
@@ -511,12 +511,12 @@ mtcp_init_rss(mctx_t mctx, in_addr_t saddr_base, int num_addr,
                /* for the INADDR_ANY, find the output interface for the destination
                   and set the saddr_base as the ip address of the output interface */
-               nif_out = GetOutputInterface(daddr);
+               nif_out = GetOutputInterface(daddr_base);
                saddr_base = CONFIG.eths[nif_out].ip_addr;
        }

        ap = CreateAddressPoolPerCore(mctx->cpu, num_cpus,
-                       saddr_base, num_addr, daddr, dport);
+                       saddr_base, num_addr, daddr_base, num_daddr, dport);
        if (!ap) {
                errno = ENOMEM;
                return -1;
diff --git a/mtcp/src/core.c b/mtcp/src/core.c
index 82f6fc6..ece4ae0 100644
--- a/mtcp/src/core.c
+++ b/mtcp/src/core.c
@@ -1396,11 +1396,13 @@ mtcp_init(char *config_file)
        PrintConfiguration();

        /* TODO: this should be fixed */
+#if 0
        ap = CreateAddressPool(CONFIG.eths[0].ip_addr, 1);
        if (!ap) {
                TRACE_CONFIG("Error occured while creating address pool.\n");
                return -1;
        }
+#endif

        PrintInterfaceInfo();

diff --git a/mtcp/src/include/addr_pool.h b/mtcp/src/include/addr_pool.h
index 7447513..452f934 100644
--- a/mtcp/src/include/addr_pool.h
+++ b/mtcp/src/include/addr_pool.h
@@ -13,7 +13,7 @@ typedef struct addr_pool *addr_pool_t;
 /* num_addr: number of addresses to use as source IP                          */
 /*----------------------------------------------------------------------------*/
 addr_pool_t
-CreateAddressPool(in_addr_t addr_base, int num_addr);
+CreateAddressPool(in_addr_t addr_base, int num_addr, in_addr_t daddr_base, int num_daddr);
 /*----------------------------------------------------------------------------*/
 /* CreateAddressPoolPerCore()                                                 */
 /* Create address pool only for the given core number.                        */
@@ -21,7 +21,7 @@ CreateAddressPool(in_addr_t addr_base, int num_addr);
+#if 0
        ap = CreateAddressPool(CONFIG.eths[0].ip_addr, 1);
        if (!ap) {
                TRACE_CONFIG("Error occured while creating address pool.\n");
                return -1;
        }
+#endif

        PrintInterfaceInfo();

diff --git a/mtcp/src/include/addr_pool.h b/mtcp/src/include/addr_pool.h
index 7447513..452f934 100644
--- a/mtcp/src/include/addr_pool.h
+++ b/mtcp/src/include/addr_pool.h
@@ -13,7 +13,7 @@ typedef struct addr_pool *addr_pool_t;
 /* num_addr: number of addresses to use as source IP                          */
 /*----------------------------------------------------------------------------*/
 addr_pool_t
-CreateAddressPool(in_addr_t addr_base, int num_addr);
+CreateAddressPool(in_addr_t addr_base, int num_addr, in_addr_t daddr_base, int num_daddr);
 /*----------------------------------------------------------------------------*/
 /* CreateAddressPoolPerCore()                                                 */
 /* Create address pool only for the given core number.                        */
@@ -21,7 +21,7 @@ CreateAddressPool(in_addr_t addr_base, int num_addr);
 /*----------------------------------------------------------------------------*/
 addr_pool_t
 CreateAddressPoolPerCore(int core, int num_queues,
-               in_addr_t saddr_base, int num_addr, in_addr_t daddr, in_port_t dport);
+               in_addr_t saddr_base, int num_addr, in_addr_t daddr_base, int num_daddr, in_port_t dport);
 /*----------------------------------------------------------------------------*/
 void
 DestroyAddressPool(addr_pool_t ap);
diff --git a/mtcp/src/include/mtcp_api.h b/mtcp/src/include/mtcp_api.h
index 84cbfc5..719fff4 100644
--- a/mtcp/src/include/mtcp_api.h
+++ b/mtcp/src/include/mtcp_api.h
@@ -97,7 +97,7 @@ mtcp_accept(mctx_t mctx, int sockid, struct sockaddr *addr, socklen_t *addrlen);

 int
 mtcp_init_rss(mctx_t mctx, in_addr_t saddr_base, int num_addr,
-               in_addr_t daddr, in_addr_t dport);
+               in_addr_t daddr_base, int num_daddr, in_addr_t dport);

 int
 mtcp_connect(mctx_t mctx, int sockid,

  diff --git a/mtcp/src/ip_out.c b/mtcp/src/ip_out.c
index 2473112..6873df6 100644
--- a/mtcp/src/ip_out.c
+++ b/mtcp/src/ip_out.c
@@ -3,6 +3,38 @@
 #include "eth_out.h"
 #include "arp.h"
 #include "debug.h"
+#include
+#include

Followers