SDN

SDN Learning

SDN/VNF Satellite Experiment

Environment Set Up

Ubuntu network bug

1
sudo ip link set enp0s1 up
1
2
3
sudo dhclient enp0s1

sudo ip addr add [Your-IPv4-Address]/[Subnet-Mask] dev enp0s1
1
2
3
4
5
sudo systemctl restart NetworkManager



sudo systemctl restart networking

Controller (OpenDaylight)

  1. Prepare the operating system
  2. Install the Java JRE
  3. Set JAVA_HOME
  4. Download the OpenDaylight Zip
  5. Unzip OpenDaylight
  6. Start OpenDaylight
  7. Bonus: Where did DLUX go?
1. Prepare Operating System
1
2
sudo apt-get -y update
sudo apt-get -y install unzip
2. Install the Java JRE

OpenDaylight runs on Java platform

1
sudo apt-get -y install openjdk-11-jre
1
sudo update-alternatives --config java

Please select Java 11 as your default java environment

3. Set JAVA_HOME
1
ls -l /etc/alternatives/java
1
echo 'export JAVA_HOME=/usr/lib/jvm/java-17-openjdk-arm64' >> ~/.bashrc
1
2
source ~/.bashrc
echo $JAVA_HOME

echo ‘export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-arm64’ >> ~/.bashrc

4. Download the OpenDaylight Zip Archive
1
curl -XGET -O https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/karaf/0.16.1/karaf-0.16.1.zip
5. Install OpenDaylight
1
$ unzip karaf-0.16.1.zip 
6. Start OpenDaylight
1
./bin/karaf 
1
opendaylight-user@root>feature:list
7. Bonus: What happened to DLUX?
1
curl -O https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/karaf/0.8.4/karaf-0.8.4.zip
1
unzip karaf-0.8.4.zip 
1
./bin/karaf 
1
sudo apt-get install openjdk-8-jre
1
sudo update-alternatives --config java

select Java 8

RYU controller

1
ryu-manager simple_switch_13.py
1
sudo mn --custom topology.py --topo mytopo --controller=remote,ip=127.0.0.1,port=6653
1
mn --topo=single,3 --mac --controller=remote,ip=127.0.0.1,port=6653 --switch ovsk,protocols=OpenFlow13

s1 = net.addSwitch(‘s1’, cls = OVSKernelSwitch, dpid= ‘0000000000000001’)

sh ovs-vsctl show

dpctl dump-flows

dpctl del-flows

dpctl add-flow in_port=1,actions=output:2

dpctl add-flow in_port=2,actions=output:1

1
2
3
4
5
h1 cat /tmp/iperf_client_1.log

h1 cat /tmp/iperf.log

h1 scp /tmp/iperf_client_h1.log limingwei@192.168.64.5:/home/limingwei/Desktop/mydir/tmp
1
2
3
4
h49 hping3 --flood --syn --rand-source h3

cp /tmp/iperf*.log /home/limingwei/Desktop/mydir/tmp_flooding/

![image-20240110101757590](/Users/joshua/Library/Application Support/typora-user-images/image-20240110101757590.png)

1
sudo mn --link tc,bw=10,delay=10ms
1
2
3
h1 iperf -s

h2 iperf -c 10.0.0.1 -i 1 -t 10

Add flow rules for based l2 addresses

1
2
3
4
sudo ovs-ofctl add-flow s1 "dl_dst=02:89:c4:04:33:36, action=1"
sudo ovs-ofctl add-flow s1 "dl_dst=3a:f4:85:86:30:c3, action=2"
sudo ovs-ofctl add-flow s1 "dl_dst=d2:5e:c6:63:ef:af, action=3"
sudo ovs-ofctl add-flow s1 "dl_dst=ff:ff:ff:ff:ff:ff, action=flood"

SDN course

ToR Switch:

Top-of-Rack Switching (ToR) is a type of network infrastructure that uses network switches to connect servers and other devices in the same rack. This type of switching allows for faster data transfer between devices and improved performance.

Create a topology with three hosts:

1
mn --topo single,3 --mac --controller default
1
sudo ovs-ofctl dump-flows s1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
mn --topo single,3 --mac --controller remote

#1
sudo ovs-ofctl add-flow s1 "in_port=2,ip, nw_dst=192.168.1.0/24,action=output:3"
sudo ovs-ofctl add-flow s1 "in_port=3,ip, nw_dst=192.168.1.0/24,action=output:2"

#2
sudo ovs-ofctl add-flow s1 arp,actions=flood

for i in {1..3}; do sudo ovs-ofctl add-flow s1 ip,nw_dst=10.0.0.$i,actions=output:$i;done

#3
h1 ifconfig |grep -i ether
h2 ifconfig |grep -i ether
h3 ifconfig |grep -i ether

sudo ovs-ofctl add-flow s1 "dl_dst=00:00:00:00:00:01, action=1"
sudo ovs-ofctl add-flow s1 "dl_dst=00:00:00:00:00:02, action=2"
sudo ovs-ofctl add-flow s1 "dl_dst=00:00:00:00:00:03, action=3"
sudo ovs-ofctl add-flow s1 "dl_dst=ff:ff:ff:ff:ff:ff, action=flood"


Lab 4

Create the topology

1
sudo mn --topo=single,3 --mac --controller=remote,ip=127.0.0.1,port=6653 --switch ovsk,protocols=OpenFlow13

specify for the ons-ofctl as well which OpenFlow version to use, for instance.

1
2
3
4
mininet> sh sudo ovs−ofctl −O OpenFlow13 dump−flows s1

sh sudo watch -n1 'ovs-ofctl -O OpenFlow13 dump-flows s1'

Learning_switch.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from ryu.base import app_manager
from ryu.controller import ofp_event,dpset
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet, ethernet, ether_types
import logging as log

class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

def __init__(self, *args, **kwargs):
super(SimpleSwitch13,self).__init__(*args, **kwargs)
log.basicConfig(format='%(levelname)s:%(message)s',level=log.DEBUG)
log.info("Controller is up and running")
self.mac_to_port = {}

@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
log.info("Switch connected with datapath ID: {}".format(datapath.id))
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)

def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser

inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)

@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self,ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']

pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]

self.logger.info("PacketIn received: \n"
"Switch: {}\n"
"in_port: {}\n".format(datapath.id,in_port))

dst = eth.dst
src = eth.src

dpid = format(datapath.id, "d").zfill(16)
self.mac_to_port.setdefault(dpid,{})

self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)

self.mac_to_port[dpid][src] = in_port

if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD

actions = [parser.OFPActionOutput(out_port)]

if out_port != ofproto.OFP_NO_BUFFER:
match = parser.OFPMatch(in_port = in_port, eth_dst = dst, eth_src = src)

if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath,1,match,actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath = datapath, buffer_id = msg.buffer_id, in_port = in_port, actions = actions, data = data)
datapath.send_msg(out)



Lab 5

LLDP:

Features: Vendor independent/agonostic

LLDP messages are periodically sent by switches by default. A flow rule should be installed into all switches’ flow tables to match on these specific LLDP packets and send them towards the controller as a Packet-In message.

Insert a matching following flow rule into each switch’s flow table:

1
sudo ovs-ofctl -O Openflow13 add-flow <switch_name> priority=65535, dl_type=0x88cc, actions=CONTROLLER:65535

Ryu can also do the job for us by starting our controoler application by extending the command with the following argument:

1
sudo ryu-manager learning_switch.py --observe-links

Topology discovery

Ryu controller provides ryu.topology, which is a switch and link discovery module providing API calls for accessing host, switch, and link data. In order to access it, import the following libraries into our controller application.

1
2
from ryu.topology import event
from ryu.topology.api import get_switch, get_link, get_host
1
self.topology api app = self # put this into the constructor of the ryu controller

Whenever we want to get the latest topology information, we can use the 3 imported functions:

1
2
3
4
5
6
7
8
# get switch list
switch_list = get_switch(self.topology_api-app, None)

#get links and their endpoints
link_list = get_link(self.topology_api_app, None)

#get hosts if there is any
hosts_list = get_host(self.topology_api_app, None)

In each case, the topology.api returns lists of objects with a variable number of accessible properties:

Link object: it describes a connection between a given pair of switches. The source and target of a link is a Port object that can be accessed by link.src and link.dst, respectively. A Port object further contains relevant information such as the datapath identifier (dpid), name of the interface (name), MAC address of that interface (hw_addr), port identifier (port_no), etc. For example, the source data path identifier of a link and the port id, where it is connected to, is accessible by link.src.dpid and link.src.port_no, respectively.

Switch object: it contains the datapath object per second, and as many Port objects as it has.

Host object: it contains its MAC address (host.mac), IPv4 and IPv6 addresses (host.ipv4 and host.ipv6) and a Port object, again, describing which switch it is connected to (e.g., host.port.dpid, host.port.port_no)

Lab 5 Task 1

Modify the controller application (learning_switch) as follows:

  • Create a function called update_topology_data() that prints out the topology information managed by the topology.api (prettify the output for a more comprehensive view)

![image-20240214160149193](/Users/joshua/Library/Application Support/typora-user-images/image-20240214160149193.png)

  • Each time a new switch connects to the controller, or a host is discovered call update_topoloy_data().
  • As LLDP messages are also sent to the controller and they also contain source and destination MAC addresses, modify your Packet-In handler to ignore these LLDP packets!

​ Hint: To easily match on LLDP packets import the following libraries

1
2
3
#for LLDP packet types
from ryu.ofproto.ether import ETH_TYPE_CFM
from ryu.ofproto.ether import ETH_TYPE_LLDP
  • At least two switches need to exist in the topology in order to obtain link information from Ryu’s API.

Solution

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
from ryu.base import app_manager
from ryu.lib.packet import lldp
from ryu.controller import ofp_event, dpset
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.topology import event
from ryu.topology.api import get_switch, get_link, get_host
from ryu.lib.packet import packet, ethernet, ether_types
from ryu.ofproto.ether import ETH_TYPE_CFM
from ryu.ofproto.ether import ETH_TYPE_LLDP
import logging as log


class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
log.basicConfig(format='%(levelname)s:%(message)s', level=log.DEBUG)
log.info("Controller is up and running")
self.topology_api_app = self
self.mac_to_port = {}


@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
log.info("Switch connected with datapath ID: {}".format(datapath.id))
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
self.update_topology_data()

def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser

inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)

def update_topology_data(self):
# get switch list
switch_list = get_switch(self.topology_api_app, None)
# get links and their endpoints
links_list = get_link(self.topology_api_app, None)
# get hosts if there is any
hosts_list = get_host(self.topology_api_app, None)
self.logger.info("Switches:")
for switch in switch_list:
self.logger.info("s{}".format(switch.dp.id))

self.logger.info("\nLinks:")
for link in links_list:
source = "s{}".format(link.src.dpid)
source_port = link.src.port_no
target = "s{}".format(link.dst.dpid)
target_port = link.dst.port_no
self.logger.info("{}(port:{})<---->(port:{}){}".format(source, source_port, target_port, target))

self.logger.info("Hosts:")
if hosts_list:
for host in hosts_list:
name = "h{}".format(host.mac.replace(':', '')[-6:]) # Using part of MAC for host identifier
self.logger.info("{}:\n\tip4:{}"
"\n\tip6:{}"
"\n\tmac={}" # Added equal sign here
"\n\tconnected to:s{}"
"\n\tport_no:{}".format(name,
host.ipv4 if host.ipv4 else "None",
host.ipv6 if host.ipv6 else "None",
host.mac,
host.port.dpid,
host.port.port_no))

@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']

pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]

if eth.ethertype in (ETH_TYPE_LLDP, ETH_TYPE_CFM):
# Ignore LLDP and CFM packets but update the topology
self.update_topology_data()
return

self.logger.info("PacketIn received: \n"
"Switch: {}\n"
"in_port: {}\n".format(datapath.id, in_port))

dst = eth.dst
src = eth.src

dpid = format(datapath.id, "d").zfill(16)
self.mac_to_port.setdefault(dpid, {})

self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)

self.mac_to_port[dpid][src] = in_port

if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD

actions = [parser.OFPActionOutput(out_port)]

if out_port != ofproto.OFP_NO_BUFFER:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)

if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
self.update_topology_data()


  • Call this function in the connection and the Packet-In handlers.

  • In our Packet-In handler we should only execute the learning switch-related code when the packets are not LLDP packets:

    1
    2
    if not pkt_eth.ethertype in (ETH_TYPE_LLDP,ETH_TYPE_CFM):
    # LLDP packets will not be processed, we just update the topology

Find paths, run algorithms, optimize routing

By means of the topology.api, we have become able to query topology-related information, such as switches, hosts and links among them. In order to run graph algorithms, e.g., find shortest paths among nodes, recalculate path in case of failure, we need to create a network graph. The good thing about graphs is that the concepts and terminology are naturally intuitive, basically graphs describe structures that map relations between objects. Obiects are called nodes and the connection among them are termed as edges. A graph can be directed or undirected based on the orientation of the edges. Making an edge undirected also implies that is is bi-directional, i.e., A <-> B = B <-> A. If a graph is directed, then the order and direction of edges do matter, ie., A -> B != B -> A.

The edegs can also have weights indicating, for instance, the distance between two nodes.

NetworkX - Basics

NetworkX is the most popular Python library to create, manipulate and analyse graphs. Some of its features are:

  • Data structures for graphs, digraphs, and multigraphs
  • Many standard graph algorithms
  • Nodes can be “anything” (e.g., text, images, XML records)
  • Edges can hold arbitrary data (e.g., weights, time-series)
  • Additional benefits from Python include fast prototyping, easy to teach, and multi-platform

The provided Mininet VM already has networks installed, however in order to reinstall it issue the following command:

1
$ sudo pip install networkx

Exploring NetworkX library and creating a simple Graph object can be done as follows:

1
2
import networkx as nx
G = nx.Graph()

In order to add 2 nodes (identified by Integer numbers) to the graph and connect them with an edge, try the following:

1
2
3
G.add_node(1)
G.add_node(2)
G.add_edge(1,2)

The following non-comprehensive list summarizeds some fundamental function calls:

  • G.add_nodes_from(list_of_nodes): Add a list of nodes to the graph
  • G.remove_node(node): Remove node node from the graph
  • G.node[node]: Get node node from the graph
  • G.add_edges_from(lists_of_edges): Add a list of edges (defined by source-destination pairs) to the graph
  • G.remove_edge(edge): Remove edge edge from the graph
  • G.edge[src] [dst]: Get an edge between nodes src and dst from the graph

You might notice that nodes and edges are not spefified as NetworkX objects. This leaves you free to use meaningful items as nodes and edges. The most common choices are numbers or strings, but a node can be any hashable object (except None). Furthermore, we can also add attributes to graphs, nodes and edges, such as weights, labels, colours or whatever Python object you like. For instance, we can define bandwidth and latecncy attributes for the links:

1
G.add_edge(1,2, bandwidth=25,latency=5)

NetworkX - Algorithms

Network provides many graph algorithms, e.g., connectivity, cliques, clustering, independent set, average node/edge degree, however from all of them we will use the simple and shortest path algorithms. The nx. shortest-path (G, src, dst) function computes the shortest path in graph G between source node src and destination node dst. Additionally, we can set a forth parameter weight, where we can indicate to use that edge attribute as the edge weight. The following example prints out the shortest path as a list from node 1 towards node 3 in graph G.

1
print(nx.shortest_path(G, source = 0, target = 4))

Lab 6

1
sudo mn --custom my_topo.py --topo mytopo --controller=remote,ip=127.0.0.1,port=6653 --switch ovsk,protocols=OpenFlow13
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
import logging as log

from ryu.ofproto.ether import ETH_TYPE_CFM
from ryu.ofproto.ether import ETH_TYPE_LLDP

# Graph manipulation library
import networkx as nx

# Fetch topology information
from ryu.topology import event
from ryu.topology.api import get_switch, get_link, get_host


class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.G = nx.Graph()
self.shortest_paths = dict()

# necessary to enable the topology monitoring functionality
self.topology_api_app = self

@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser

match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
self.update_topology_data()

def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser

inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)

@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']

pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]

if eth.ethertype in (ETH_TYPE_LLDP, ETH_TYPE_CFM):
# ignore lldp packet
return

self.update_topology_data()


def update_topology_data(self):
# Get lists for switches, links, and hosts
switch_list = get_switch(self.topology_api_app, None)
links_list = get_link(self.topology_api_app, None)
hosts_list = get_host(self.topology_api_app, None)


self.logger.info("Switches:")
for switch in switch_list:
print(switch)
switch_name="s{}".format(switch.dp.id)
self.logger.info(switch_name)


# add switches to graph with preset attribute names
# define a recent_port data dictionary as an attribute for the
# swithes - it will be updated in each case
# a new port comes up
self.G.add_node(
switch_name,
name=switch_name,
dp=switch.dp,
port=switch.ports
)


self.logger.info("\nLinks:")
for link in links_list:
print(link)
source = "s{}".format(link.src.dpid)
source_port = link.src.port_no
target = "s{}".format(link.dst.dpid)
target_port = link.dst.port_no
self.logger.info("{}(port:{}) <---> (port:{}){}".format(source,source_port,target_port,target))
# networkx links in Graph() are not differentiated by source and destination, so a link and its data become
# updated when add_edge is called with the source and destination swapped
self.G.add_edge(source, target,
src_dpid=source, src_port=link.src.port_no,
dst_dpid=target, dst_port=link.dst.port_no)



self.logger.info("Hosts:\n")
if hosts_list:
for host in hosts_list:
# assemble name according to mac
host_name="h{}".format(host.mac.split(":")[5][1])
print(host_name)
last_byte_hex = host.mac.split(":")[5]
last_byte_int = int(last_byte_hex, 16)

if last_byte_int == 0:
last_byte_int = 1
elif last_byte_int > 254:
last_byte_int = 254

host.ipv4 = "10.0.0.{}".format(last_byte_int)
self.logger.info("{}:\n\tip4:{}"
"\n\tip6:{}"
"\n\tmac:{}"
"\n\tconnected to:s{}"
"\n\tport_no:{}".format(
host_name,
host.ipv4,
host.ipv6,
host.mac,
host.port.dpid,
host.port.port_no
)
)


self.G.add_node(
host_name,
name=host_name,
ipv4=host.ipv4,
ipv6=host.ipv6,
mac=host.mac,
connected_to="s{}".format(host.port.dpid),
port_no=host.port.port_no)
# add corresponding links to the graph
self.G.add_edge(host_name,
"s{}".format(host.port.dpid),
dst_port=host.port.port_no,
dst_dpid="s{}".format(host.port.dpid))




# update shortest paths
self.calculate_all_pair_shortest_paths()
print("Shortest Paths:\n{}".format(self.shortest_paths))
self.install_shortest_paths_flow_rules()


def calculate_shortest_paths(self,src,dst):
'''
This function returns all shortest paths between the given source and destination node
:param src: String - the source node's name
:param dst: String - the destination node's name
:return: list of lists
'''
if src not in self.G.nodes() or dst not in self.G.nodes():
return None
paths = list()
try:
all_sp = nx.all_shortest_paths(self.G, src, dst)
for path in all_sp:
paths.append(path)

except nx.NetworkXNoPath: # no path between src and dst
log.info("No path between {} and {}".format(src, dst))
return None

return paths


def calculate_all_pair_shortest_paths(self):
'''
This function calculates all shortest paths for all source and destinations
Note: NetworkX also have similar function (all_pairs_shortest_path(G[, cutoff])),
however that only gives one shortest path for a given (source,destination) pair
:return: dictionary of dictionary of list of lists, e.g., h1:{h2:[[h1,s1,h2],[h1,s2,h2]]}
'''
all_paths = dict()
for n in self.G.nodes():
if n.startswith('h'): #only hosts are relevant
all_paths[n] = dict()
for m in self.G.nodes():
if m.startswith('h'):
if n == m:
continue
all_paths[n][m] = self.calculate_shortest_paths(n, m)

self.shortest_paths = all_paths


def install_flow_rule_for_chain_link(self, chain_link, chain_prev, chain_next, source_ip, destination_ip):
'''
This function installs matching flow rules on source_ip and destination_ip in switch
chain_link and outputs packets on ports that are connected to its upstream (chain_prev)
and downstream (chain_next) nodes, respectively.
According to the chain_prev and chain_next, it gets the link/port number information
from the graph that stores them
:param chain_link: String - the name of the chain_link
:param chain_prev: String - the name of the previous switch
:param chain_next: String - the name of the next switch
:param source_ip: tuple(String,String) - source host IP address and netmask for the upstream
:param destination_ip: tuple(String,String) - the destination IP address and netmask for the downstream
:return:
'''

datapath = self.G.nodes[chain_link]['dp']
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
match_source_ip = ofp_parser.OFPMatch(eth_type=0x0800, ipv4_dst=source_ip)
match_destination_ip = ofp_parser.OFPMatch(eth_type=0x0800, ipv4_dst=destination_ip)

# --- upstream
# get edge_data
edge = self.G[chain_link][chain_prev]
print ("upstream edge: ", edge)
if edge['dst_dpid'] == chain_link:
# if prev is a host, then it is always the case that edge['dst_port'] stores the port number
out_port = edge['dst_port']
else:
# if prev is a switch, then it might be the src_dpid
out_port = edge['src_port']
actions = [ofp_parser.OFPActionOutput(out_port, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
print("install flow rule for SIP {} - DIP {} at {} to forward packet on port {}".
format(source_ip, destination_ip, chain_link, out_port))
self.send_flow_mod(datapath, None, match=match_source_ip, inst=inst)

# --- downstream
# get edge_data
edge = self.G[chain_link][chain_next]
print("downstream edge: ", edge)
if edge['dst_dpid'] == chain_link:
# if next is a host, then it is always the case that edge['dst_port'] stores the port number
out_port = edge['dst_port']
else:
# if next is a switch, then it might be the src_dpid
out_port = edge['src_port']
actions = [ofp_parser.OFPActionOutput(out_port, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
log.info("install flow rule for SIP {} - DIP {} at {} to forward packet on port {}".
format(source_ip, destination_ip, chain_link, out_port))
self.send_flow_mod(datapath, None, match=match_destination_ip, inst=inst)



def install_shortest_paths_flow_rules(self):
'''
This function will install flow rules according to the shortest paths
:return:
'''
paths = self.shortest_paths
# paths looks like {h1:{h2:[[path1],[path2]]}} <- EXAMPLE
if paths:
for source in paths: # source = h1
source_host = self.G.nodes[source]
print("Source host: {}".format(source_host))
source_ip = (source_host['ipv4'], '255.255.255.255')
print("Source ip: {}".format(source_ip))

# self.log.info(paths[source]) # paths[source] = {h2: [[path1],[path2]]
for p in paths[source]: # p = h2
destination_host = self.G.nodes[p]
destination_ip = (destination_host['ipv4'], '255.255.255.255')
if paths[source][p]:
for path_num, j in enumerate(
paths[source][p]): # paths[source][p] = [[path1],[path2]], j = one path from paths
# install the first rule always!
individual_path = j
if individual_path:
for num, sw in enumerate(individual_path):
# print sw
if sw.startswith('h'):
# it's a host, skip (this will also prevent running out of indexes in both direction (see below))
continue

prev = individual_path[num - 1]
current = individual_path[num]
next = individual_path[num + 1]
self.install_flow_rule_for_chain_link(current, prev, next, source_ip, destination_ip)
# break



def send_flow_mod(self, datapath, msg, **args):
'''
Sending a flow_mod to the given switch
:param datapath: Datapath - datapath of the switch
:param msg: PacketIn message
:param args: cookie=0, table=0, cookie_mask=0,idle_timeout=0,hard_timeout=0,priority=100,buffer_id=OFP_NO_BUFFER,
mod_type= OFPFC_ADD, match=OFPMatch(in_port=1,broadcast_eth_dst),
inst=OFPInstructionActions(apply action,OFPActionOutput(2)),
:return: nothing
'''
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
table_id = args.get('table',0)
cookie = args.get('cookie', 0)
cookie_mask = args.get('cookie_mask',0)
idle_timeout = args.get('idle_timeout', 0)
hard_timeout = args.get('hard_timeout', 0)
priority = args.get('priority', 100)
if msg:
buffer_id = args.get('buffer_id', msg.buffer_id)
else:
buffer_id=ofp.OFP_NO_BUFFER

mod_type = args.get('mod_type', ofp.OFPFC_ADD)


match = args.get('match',ofp_parser.OFPMatch(in_port=1, eth_dst='ff:ff:ff:ff:ff:ff'))
inst = args.get('inst',
[ofp_parser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[ofp_parser.OFPActionOutput(2)])])



flowmod = ofp_parser.OFPFlowMod(datapath, cookie, cookie_mask,
table_id, mod_type,
idle_timeout, hard_timeout,
priority, buffer_id,
ofp.OFPP_ANY, ofp.OFPG_ANY,
ofp.OFPFF_SEND_FLOW_REM,
match, inst)

# log.info("Sending flowmod:\n {}".format(flowmod))
datapath.send_msg(flowmod)

Lab 7

1
2
3
4
5
6
7
8
9
python tcp_exp.py -b 1000 -d 7.5ms

mininet> xterm h1 h2

h2> iperf -s &

mininet> h2 wireshark

h1> iperf -c 10.0.0.2 -i 1 -n 1M
Donate
  • Copyright: Copyright is owned by the author. For commercial reprints, please contact the author for authorization. For non-commercial reprints, please indicate the source.
  • Copyrights © 2021-2024 Mingwei Li
  • Visitors: | Views:

Buy me a bottle of beer please~

支付宝
微信