blob: c3005eedbb62f67955e1b11ef76c646ee21fca14 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
26from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart
27from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthickec2db322016-11-17 15:06:01 -080064 testcaseLoggers = ('test_cluster_controller_restarts',)
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
111 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800112 result_map = {}
113 if controller is None:
114 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800115 if type(controller) in [ list, tuple ]:
116 for c in controller:
117 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800118 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800119 else:
120 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map[controller] = leaders
122 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800123
A R Karthickec2db322016-11-17 15:06:01 -0800124 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800125 leaders_map = self.get_leaders(controller = controller)
126 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800127 return failed
128
ChetanGaonker2099d722016-10-07 15:16:58 -0700129 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
130 tries = 0
131 try:
132 self.cliEnter(controller = controller)
133 while tries <= 10:
134 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
135 if cluster_summary:
136 log.info("cluster 'summary' command output is %s"%cluster_summary)
137 nodes = cluster_summary['nodes']
138 if verify:
139 if nodes == onos_instances:
140 self.cliExit()
141 return True
142 else:
143 tries += 1
144 time.sleep(1)
145 else:
146 if nodes >= onos_instances:
147 self.cliExit()
148 return True
149 else:
150 tries += 1
151 time.sleep(1)
152 else:
153 tries += 1
154 time.sleep(1)
155 self.cliExit()
156 return False
157 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700158 raise Exception('Failed to get cluster members')
159 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700160
A.R Karthick45ab3e12016-11-30 11:25:51 -0800161 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700162 tries = 0
163 cluster_ips = []
164 try:
165 self.cliEnter(controller = controller)
166 while tries <= 10:
167 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
168 if cluster_nodes:
169 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800170 if nodes_filter:
171 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700172 cluster_ips = map(lambda c: c['id'], cluster_nodes)
173 self.cliExit()
174 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
175 return cluster_ips
176 else:
177 tries += 1
178 self.cliExit()
179 return cluster_ips
180 except:
181 raise Exception('Failed to get cluster members')
182 return cluster_ips
183
ChetanGaonker689b3862016-10-17 16:25:01 -0700184 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800185 onos_names_ips = {}
186 onos_ips = self.get_cluster_current_member_ips(controller=controller)
187 onos_names_ips[onos_ips[0]] = Onos.NAME
188 onos_names_ips[Onos.NAME] = onos_ips[0]
189 for i in range(1,len(onos_ips)):
190 name = '{0}-{1}'.format(Onos.NAME,i+1)
191 onos_names_ips[onos_ips[i]] = name
192 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700193
194 return onos_names_ips
195
196 #identifying current master of a connected device, not tested
197 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
198 master = None
199 standbys = []
200 tries = 0
201 try:
202 cli = self.cliEnter(controller = controller)
203 while tries <= 10:
204 roles = json.loads(self.cli.roles(jsonFormat = True))
205 log.info("cluster 'roles' command output is %s"%roles)
206 if roles:
207 for device in roles:
208 log.info('Verifying device info in line %s'%device)
209 if device['id'] == device_id:
210 master = str(device['master'])
211 standbys = map(lambda d: str(d), device['standbys'])
212 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
213 self.cliExit()
214 return master, standbys
215 self.cliExit()
216 return master, standbys
217 else:
218 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700219 time.sleep(1)
220 self.cliExit()
221 return master,standbys
222 except:
223 raise Exception('Failed to get cluster members')
224 return master,standbys
225
226 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
227 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
228 device_dict = {}
229 tries = 0
230 try:
231 cli = self.cliEnter(controller = controller)
232 while tries <= 10:
233 device_dict = {}
234 roles = json.loads(self.cli.roles(jsonFormat = True))
235 log.info("cluster 'roles' command output is %s"%roles)
236 if roles:
237 for device in roles:
238 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
239 for i in range(len(device_dict[device['id']]['standbys'])):
240 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
241 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
242 self.cliExit()
243 return device_dict
244 else:
245 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700246 time.sleep(1)
247 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700248 return device_dict
249 except:
250 raise Exception('Failed to get cluster members')
251 return device_dict
252
253 #identify current master of a connected device, not tested
254 def get_cluster_connected_devices(self,controller=None):
255 '''returns all the devices connected to ONOS cluster'''
256 device_list = []
257 tries = 0
258 try:
259 cli = self.cliEnter(controller = controller)
260 while tries <= 10:
261 device_list = []
262 devices = json.loads(self.cli.devices(jsonFormat = True))
263 log.info("cluster 'devices' command output is %s"%devices)
264 if devices:
265 for device in devices:
266 log.info('device id is %s'%device['id'])
267 device_list.append(str(device['id']))
268 self.cliExit()
269 return device_list
270 else:
271 tries += 1
272 time.sleep(1)
273 self.cliExit()
274 return device_list
275 except:
276 raise Exception('Failed to get cluster members')
277 return device_list
278
279 def get_number_of_devices_of_master(self,controller=None):
280 '''returns master-device pairs, which master having what devices'''
281 master_count = {}
282 try:
283 cli = self.cliEnter(controller = controller)
284 masters = json.loads(self.cli.masters(jsonFormat = True))
285 if masters:
286 for master in masters:
287 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
288 return master_count
289 else:
290 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700291 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700292 raise Exception('Failed to get cluster members')
293 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700294
295 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
296 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700297 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700298 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
299 command = self.cli.command(cmd = cmd, jsonFormat = False)
300 self.cliExit()
301 time.sleep(60)
302 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
303 assert_equal(master,new_master)
304 log.info('Cluster master changed to %s successfully'%new_master)
305
ChetanGaonker689b3862016-10-17 16:25:01 -0700306 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
307 '''current master looses its mastership and hence new master will be elected'''
308 self.cliEnter(controller=controller)
309 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
310 command = self.cli.command(cmd = cmd, jsonFormat = False)
311 self.cliExit()
312 time.sleep(60)
313 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
314 assert_not_equal(new_master_ip,master_ip)
315 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
316 log.info('Cluster new master is %s'%new_master_ip)
317 return True
318
A R Karthickec2db322016-11-17 15:06:01 -0800319 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800320 '''Test the cluster by repeatedly killing the controllers'''
321 controllers = self.get_controllers()
322 ctlr_len = len(controllers)
323 if ctlr_len <= 1:
324 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
325 assert_greater(ctlr_len, 1)
326
327 #this call would verify the cluster for once
328 onos_map = self.get_cluster_container_names_ips()
329
A R Karthickec2db322016-11-17 15:06:01 -0800330 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800331 adjacent_controller = None
332 adjacent_controllers = None
333 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800334 adjacent_controllers = list(set(controllers) - set([controller]))
335 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800336 for node in controllers:
337 onosLog = OnosLog(host = node)
338 ##check the logs for storage exception
339 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800340 if output and output.find('StorageException$Timeout') >= 0:
341 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
342 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
343 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800344 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800345 log.info('\n' + '-' * 50 + '\n')
346 failed = self.verify_leaders(controllers)
347 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800348 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800349 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800350 return controller
351
352 try:
A R Karthickec2db322016-11-17 15:06:01 -0800353 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800354 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800355 st = True
356 except:
357 st = False
358
A R Karthickec2db322016-11-17 15:06:01 -0800359 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800360 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800361 if st is False:
362 log.info('No storage exception and ONOS cluster was not formed successfully')
363 else:
364 controller = None
365
366 return controller
367
368 next_controller = None
369 tries = 10
370 for num in range(tries):
371 index = num % ctlr_len
372 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800373 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
374 controller = onos_map[controller_name]
375 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800376 try:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800377 cord_test_onos_restart(node = controller_name, timeout = 0)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800378 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800379 except:
380 time.sleep(5)
381 continue
A R Karthickec2db322016-11-17 15:06:01 -0800382 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800383
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 def test_cluster_single_controller_restarts(self):
385 '''Test the cluster by repeatedly restarting the same controller'''
386 controllers = self.get_controllers()
387 ctlr_len = len(controllers)
388 if ctlr_len <= 1:
389 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
390 assert_greater(ctlr_len, 1)
391
392 #this call would verify the cluster for once
393 onos_map = self.get_cluster_container_names_ips()
394
395 def check_exception(controller, inclusive = False):
396 adjacent_controllers = list(set(controllers) - set([controller]))
397 adjacent_controller = adjacent_controllers[0]
398 controller_list = adjacent_controllers if inclusive == False else controllers
399 storage_exceptions = []
400 for node in controller_list:
401 onosLog = OnosLog(host = node)
402 ##check the logs for storage exception
403 _, output = onosLog.get_log(('ERROR', 'Exception',))
404 if output and output.find('StorageException$Timeout') >= 0:
405 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
406 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
407 log.info('\n' + '-' * 50 + '\n')
408 log.info('%s' %output)
409 log.info('\n' + '-' * 50 + '\n')
410 storage_exceptions.append(node)
411
412 failed = self.verify_leaders(controller_list)
413 if failed:
414 log.info('Leaders command failed on nodes: %s' %failed)
415 if storage_exceptions:
416 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
417 assert_equal(len(failed), 0)
418 return controller
419
420 for ctlr in controller_list:
421 ips = self.get_cluster_current_member_ips(controller = ctlr,
422 nodes_filter = \
423 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
424 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
425 if controller in ips and inclusive is False:
426 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
427 if controller not in ips and inclusive is True:
428 log.info('Controller %s still INACTIVE on Node %s after it was shutdown' %(controller, ctlr))
429
430 return controller
431
432 tries = 10
433 #chose a random controller for shutdown/restarts
434 controller = controllers[random.randrange(0, ctlr_len)]
435 controller_name = onos_map[controller]
436 for num in range(tries):
437 index = num % ctlr_len
438 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
439 try:
440 cord_test_onos_shutdown(node = controller_name)
441 time.sleep(20)
442 except:
443 time.sleep(5)
444 continue
445 #check for exceptions on the adjacent nodes
446 check_exception(controller)
447 #Now restart the controller back
448 log.info('Restarting back the controller %s' %controller_name)
449 cord_test_onos_restart(node = controller_name)
450 time.sleep(60)
451 check_exception(controller, inclusive = True)
452
ChetanGaonker2099d722016-10-07 15:16:58 -0700453 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700454 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
455 status = self.verify_cluster_status(onos_instances = onos_instances)
456 assert_equal(status, True)
457 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700458
459 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700460 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700461 status = self.verify_cluster_status(onos_instances = onos_instances)
462 assert_equal(status, True)
463 onos_ips = self.get_cluster_current_member_ips()
464 onos_instances = len(onos_ips)+add
465 log.info('Adding %d nodes to the ONOS cluster' %add)
466 cord_test_onos_add_cluster(count = add)
467 status = self.verify_cluster_status(onos_instances=onos_instances)
468 assert_equal(status, True)
469
ChetanGaonker689b3862016-10-17 16:25:01 -0700470 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700471 status = self.verify_cluster_status(onos_instances = onos_instances)
472 assert_equal(status, True)
473 master, standbys = self.get_cluster_current_master_standbys()
474 assert_equal(len(standbys),(onos_instances-1))
475 onos_names_ips = self.get_cluster_container_names_ips()
476 master_onos_name = onos_names_ips[master]
477 log.info('Removing cluster current master %s'%(master))
478 cord_test_onos_shutdown(node = master_onos_name)
479 time.sleep(60)
480 onos_instances -= 1
481 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
482 assert_equal(status, True)
483 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
484 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700485 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700486
ChetanGaonker689b3862016-10-17 16:25:01 -0700487 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700488 status = self.verify_cluster_status(onos_instances = onos_instances)
489 assert_equal(status, True)
490 master, standbys = self.get_cluster_current_master_standbys()
491 assert_equal(len(standbys),(onos_instances-1))
492 onos_names_ips = self.get_cluster_container_names_ips()
493 member_onos_name = onos_names_ips[standbys[0]]
494 log.info('Removing cluster member %s'%standbys[0])
495 cord_test_onos_shutdown(node = member_onos_name)
496 time.sleep(60)
497 onos_instances -= 1
498 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
499 assert_equal(status, True)
500
ChetanGaonker689b3862016-10-17 16:25:01 -0700501 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700502 status = self.verify_cluster_status(onos_instances = onos_instances)
503 assert_equal(status, True)
504 master, standbys = self.get_cluster_current_master_standbys()
505 assert_equal(len(standbys),(onos_instances-1))
506 onos_names_ips = self.get_cluster_container_names_ips()
507 member1_onos_name = onos_names_ips[standbys[0]]
508 member2_onos_name = onos_names_ips[standbys[1]]
509 log.info('Removing cluster member %s'%standbys[0])
510 cord_test_onos_shutdown(node = member1_onos_name)
511 log.info('Removing cluster member %s'%standbys[1])
512 cord_test_onos_shutdown(node = member2_onos_name)
513 time.sleep(60)
514 onos_instances = onos_instances - 2
515 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
516 assert_equal(status, True)
517
ChetanGaonker689b3862016-10-17 16:25:01 -0700518 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700519 status = self.verify_cluster_status(onos_instances = onos_instances)
520 assert_equal(status, True)
521 master, standbys = self.get_cluster_current_master_standbys()
522 assert_equal(len(standbys),(onos_instances-1))
523 onos_names_ips = self.get_cluster_container_names_ips()
524 for i in range(remove):
525 member_onos_name = onos_names_ips[standbys[i]]
526 log.info('Removing onos container with name %s'%standbys[i])
527 cord_test_onos_shutdown(node = member_onos_name)
528 time.sleep(60)
529 onos_instances = onos_instances - remove
530 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
531 assert_equal(status, True)
532
533 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700534 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700535 status = self.verify_cluster_status(onos_instances = onos_instances)
536 assert_equal(status, True)
537 onos_ips = self.get_cluster_current_member_ips()
538 onos_instances = len(onos_ips)+add
539 log.info('Adding %d ONOS instances to the cluster'%add)
540 cord_test_onos_add_cluster(count = add)
541 status = self.verify_cluster_status(onos_instances=onos_instances)
542 assert_equal(status, True)
543 log.info('Removing %d ONOS instances from the cluster'%remove)
544 for i in range(remove):
545 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
546 log.info('Removing onos container with name %s'%name)
547 cord_test_onos_shutdown(node = name)
548 time.sleep(60)
549 onos_instances = onos_instances-remove
550 status = self.verify_cluster_status(onos_instances=onos_instances)
551 assert_equal(status, True)
552
553 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700554 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700555 status = self.verify_cluster_status(onos_instances = onos_instances)
556 assert_equal(status, True)
557 onos_ips = self.get_cluster_current_member_ips()
558 onos_instances = onos_instances-remove
559 log.info('Removing %d ONOS instances from the cluster'%remove)
560 for i in range(remove):
561 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
562 log.info('Removing onos container with name %s'%name)
563 cord_test_onos_shutdown(node = name)
564 time.sleep(60)
565 status = self.verify_cluster_status(onos_instances=onos_instances)
566 assert_equal(status, True)
567 log.info('Adding %d ONOS instances to the cluster'%add)
568 cord_test_onos_add_cluster(count = add)
569 onos_instances = onos_instances+add
570 status = self.verify_cluster_status(onos_instances=onos_instances)
571 assert_equal(status, True)
572
ChetanGaonker689b3862016-10-17 16:25:01 -0700573 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700574 status = self.verify_cluster_status(onos_instances = onos_instances)
575 assert_equal(status, True)
576 log.info('Restarting cluster')
577 cord_test_onos_restart()
578 status = self.verify_cluster_status(onos_instances = onos_instances)
579 assert_equal(status, True)
580
ChetanGaonker689b3862016-10-17 16:25:01 -0700581 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700582 status = self.verify_cluster_status(onos_instances = onos_instances)
583 assert_equal(status, True)
584 master, standbys = self.get_cluster_current_master_standbys()
585 onos_names_ips = self.get_cluster_container_names_ips()
586 master_onos_name = onos_names_ips[master]
587 log.info('Restarting cluster master %s'%master)
588 cord_test_onos_restart(node = master_onos_name)
589 status = self.verify_cluster_status(onos_instances = onos_instances)
590 assert_equal(status, True)
591 log.info('Cluster came up after master restart as expected')
592
593 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700594 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700595 status = self.verify_cluster_status(onos_instances = onos_instances)
596 assert_equal(status, True)
597 master1, standbys = self.get_cluster_current_master_standbys()
598 onos_names_ips = self.get_cluster_container_names_ips()
599 master_onos_name = onos_names_ips[master1]
600 log.info('Restarting cluster master %s'%master)
601 cord_test_onos_restart(node = master_onos_name)
602 status = self.verify_cluster_status(onos_instances = onos_instances)
603 assert_equal(status, True)
604 master2, standbys = self.get_cluster_current_master_standbys()
605 assert_equal(master1,master2)
606 log.info('Cluster master is same before and after cluster master restart as expected')
607
ChetanGaonker689b3862016-10-17 16:25:01 -0700608 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700609 status = self.verify_cluster_status(onos_instances = onos_instances)
610 assert_equal(status, True)
611 master, standbys = self.get_cluster_current_master_standbys()
612 assert_equal(len(standbys),(onos_instances-1))
613 onos_names_ips = self.get_cluster_container_names_ips()
614 member_onos_name = onos_names_ips[standbys[0]]
615 log.info('Restarting cluster member %s'%standbys[0])
616 cord_test_onos_restart(node = member_onos_name)
617 status = self.verify_cluster_status(onos_instances = onos_instances)
618 assert_equal(status, True)
619 log.info('Cluster came up as expected after restarting one member')
620
ChetanGaonker689b3862016-10-17 16:25:01 -0700621 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700622 status = self.verify_cluster_status(onos_instances = onos_instances)
623 assert_equal(status, True)
624 master, standbys = self.get_cluster_current_master_standbys()
625 assert_equal(len(standbys),(onos_instances-1))
626 onos_names_ips = self.get_cluster_container_names_ips()
627 member1_onos_name = onos_names_ips[standbys[0]]
628 member2_onos_name = onos_names_ips[standbys[1]]
629 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
630 cord_test_onos_restart(node = member1_onos_name)
631 cord_test_onos_restart(node = member2_onos_name)
632 status = self.verify_cluster_status(onos_instances = onos_instances)
633 assert_equal(status, True)
634 log.info('Cluster came up as expected after restarting two members')
635
ChetanGaonker689b3862016-10-17 16:25:01 -0700636 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700637 status = self.verify_cluster_status(onos_instances = onos_instances)
638 assert_equal(status,True)
639 master, standbys = self.get_cluster_current_master_standbys()
640 assert_equal(len(standbys),(onos_instances-1))
641 onos_names_ips = self.get_cluster_container_names_ips()
642 for i in range(members):
643 member_onos_name = onos_names_ips[standbys[i]]
644 log.info('Restarting cluster member %s'%standbys[i])
645 cord_test_onos_restart(node = member_onos_name)
646
647 status = self.verify_cluster_status(onos_instances = onos_instances)
648 assert_equal(status, True)
649 log.info('Cluster came up as expected after restarting %d members'%members)
650
ChetanGaonker689b3862016-10-17 16:25:01 -0700651 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 status = self.verify_cluster_status(onos_instances=onos_instances)
653 assert_equal(status, True)
654 master, standbys = self.get_cluster_current_master_standbys()
655 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700656 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700657 self.change_master_current_cluster(new_master=standbys[0])
658 log.info('Cluster master changed successfully')
659
660 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700661 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700662 status = self.verify_cluster_status(onos_instances = onos_instances)
663 assert_equal(status, True)
664 onos_ips = self.get_cluster_current_member_ips()
665 self.vrouter.setUpClass()
666 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
667 assert_equal(res, True)
668 for onos_ip in onos_ips:
669 tries = 0
670 flag = False
671 try:
672 self.cliEnter(controller = onos_ip)
673 while tries <= 5:
674 routes = json.loads(self.cli.routes(jsonFormat = True))
675 if routes:
676 assert_equal(len(routes['routes4']), networks)
677 self.cliExit()
678 flag = True
679 break
680 else:
681 tries += 1
682 time.sleep(1)
683 assert_equal(flag, True)
684 except:
685 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
686 raise
687
688 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700689 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700690 status = self.verify_cluster_status(onos_instances = onos_instances)
691 assert_equal(status, True)
692 onos_ips = self.get_cluster_current_member_ips()
693 master, standbys = self.get_cluster_current_master_standbys()
694 onos_names_ips = self.get_cluster_container_names_ips()
695 master_onos_name = onos_names_ips[master]
696 self.vrouter.setUpClass()
697 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
698 assert_equal(res,True)
699 cord_test_onos_shutdown(node = master_onos_name)
700 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700701 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700702 self.vrouter.vrouter_traffic_verify()
703
704 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700705 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700706 status = self.verify_cluster_status(onos_instances = onos_instances)
707 assert_equal(status, True)
708 onos_ips = self.get_cluster_current_member_ips()
709 master, standbys = self.get_cluster_current_master_standbys()
710 onos_names_ips = self.get_cluster_container_names_ips()
711 master_onos_name = onos_names_ips[master]
712 self.vrouter.setUpClass()
713 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
714 assert_equal(res, True)
715 cord_test_onos_restart()
716 self.vrouter.vrouter_traffic_verify()
717
718 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700719 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700720 status = self.verify_cluster_status(onos_instances = onos_instances)
721 assert_equal(status, True)
722 self.vrouter.setUpClass()
723 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
724 assert_equal(res, True)
725 self.vrouter.vrouter_activate(deactivate=True)
726 time.sleep(15)
727 self.vrouter.vrouter_traffic_verify(positive_test=False)
728 self.vrouter.vrouter_activate(deactivate=False)
729
730 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700731 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700732 status = self.verify_cluster_status(onos_instances = onos_instances)
733 assert_equal(status, True)
734 master, standbys = self.get_cluster_current_master_standbys()
735 onos_names_ips = self.get_cluster_container_names_ips()
736 master_onos_name = onos_names_ips[master]
737 self.vrouter.setUpClass()
738 log.info('Verifying vrouter before master down')
739 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
740 assert_equal(res, True)
741 self.vrouter.vrouter_activate(deactivate=True)
742 log.info('Verifying vrouter traffic after app deactivated')
743 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
744 self.vrouter.vrouter_traffic_verify(positive_test=False)
745 log.info('Verifying vrouter traffic after master down')
746 cord_test_onos_shutdown(node = master_onos_name)
747 time.sleep(60)
748 self.vrouter.vrouter_traffic_verify(positive_test=False)
749 self.vrouter.vrouter_activate(deactivate=False)
750
751 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700752 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700753 status = self.verify_cluster_status(onos_instances = onos_instances)
754 assert_equal(status, True)
755 master, standbys = self.get_cluster_current_master_standbys()
756 onos_names_ips = self.get_cluster_container_names_ips()
757 member_onos_name = onos_names_ips[standbys[0]]
758 self.vrouter.setUpClass()
759 log.info('Verifying vrouter before cluster member down')
760 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
761 assert_equal(res, True) # Expecting vrouter should work properly
762 log.info('Verifying vrouter after cluster member down')
763 cord_test_onos_shutdown(node = member_onos_name)
764 time.sleep(60)
765 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
766
767 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700768 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700769 status = self.verify_cluster_status(onos_instances = onos_instances)
770 assert_equal(status, True)
771 master, standbys = self.get_cluster_current_master_standbys()
772 onos_names_ips = self.get_cluster_container_names_ips()
773 member_onos_name = onos_names_ips[standbys[1]]
774 self.vrouter.setUpClass()
775 log.info('Verifying vrouter traffic before cluster member restart')
776 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
777 assert_equal(res, True) # Expecting vrouter should work properly
778 cord_test_onos_restart(node = member_onos_name)
779 log.info('Verifying vrouter traffic after cluster member restart')
780 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
781
782 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700783 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700784 status = self.verify_cluster_status(onos_instances = onos_instances)
785 assert_equal(status, True)
786 self.vrouter.setUpClass()
787 log.info('Verifying vrouter traffic before cluster restart')
788 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
789 assert_equal(res, True) # Expecting vrouter should work properly
790 cord_test_onos_restart()
791 log.info('Verifying vrouter traffic after cluster restart')
792 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
793
794
795 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700796 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700797 status = self.verify_cluster_status(onos_instances = onos_instances)
798 assert_equal(status, True)
799 master, standbys = self.get_cluster_current_master_standbys()
800 onos_names_ips = self.get_cluster_container_names_ips()
801 master_onos_name = onos_names_ips[master]
802 self.flows.setUpClass()
803 egress = 1
804 ingress = 2
805 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
806 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
807 flow = OnosFlowCtrl(deviceId = self.device_id,
808 egressPort = egress,
809 ingressPort = ingress,
810 udpSrc = ingress_map['udp_port'],
811 udpDst = egress_map['udp_port'],
812 controller=master
813 )
814 result = flow.addFlow()
815 assert_equal(result, True)
816 time.sleep(1)
817 self.success = False
818 def mac_recv_task():
819 def recv_cb(pkt):
820 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
821 self.success = True
822 sniff(timeout=2,
823 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
824 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
825
826 for i in [0,1]:
827 if i == 1:
828 cord_test_onos_shutdown(node = master_onos_name)
829 log.info('Verifying flows traffic after master killed')
830 time.sleep(45)
831 else:
832 log.info('Verifying flows traffic before master killed')
833 t = threading.Thread(target = mac_recv_task)
834 t.start()
835 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
836 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
837 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
838 pkt = L2/L3/L4
839 log.info('Sending packets to verify if flows are correct')
840 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
841 t.join()
842 assert_equal(self.success, True)
843
ChetanGaonker689b3862016-10-17 16:25:01 -0700844 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700845 status = self.verify_cluster_status(onos_instances=onos_instances)
846 assert_equal(status, True)
847 master, standbys = self.get_cluster_current_master_standbys()
848 self.flows.setUpClass()
849 egress = 1
850 ingress = 2
851 egress_map = { 'ip': '192.168.30.1' }
852 ingress_map = { 'ip': '192.168.40.1' }
853 flow = OnosFlowCtrl(deviceId = self.device_id,
854 egressPort = egress,
855 ingressPort = ingress,
856 ecn = 1,
857 controller=master
858 )
859 result = flow.addFlow()
860 assert_equal(result, True)
861 ##wait for flows to be added to ONOS
862 time.sleep(1)
863 self.success = False
864 def mac_recv_task():
865 def recv_cb(pkt):
866 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
867 self.success = True
868 sniff(count=2, timeout=5,
869 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
870 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
871 iface = self.flows.port_map[egress])
872 for i in [0,1]:
873 if i == 1:
874 log.info('Changing cluster master to %s'%standbys[0])
875 self.change_master_current_cluster(new_master=standbys[0])
876 log.info('Verifying flow traffic after cluster master chnaged')
877 else:
878 log.info('Verifying flow traffic before cluster master changed')
879 t = threading.Thread(target = mac_recv_task)
880 t.start()
881 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
882 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
883 pkt = L2/L3
884 log.info('Sending a packet to verify if flows are correct')
885 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
886 t.join()
887 assert_equal(self.success, True)
888
ChetanGaonker689b3862016-10-17 16:25:01 -0700889 #pass
890 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
891 status = self.verify_cluster_status(onos_instances=onos_instances)
892 assert_equal(status, True)
893 master,standbys = self.get_cluster_current_master_standbys()
894 onos_names_ips = self.get_cluster_container_names_ips()
895 master_onos_name = onos_names_ips[master]
896 self.flows.setUpClass()
897 egress = 1
898 ingress = 2
899 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
900 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
901 flow = OnosFlowCtrl(deviceId = self.device_id,
902 egressPort = egress,
903 ingressPort = ingress,
904 ipv6_extension = 0,
905 controller=master
906 )
907
908 result = flow.addFlow()
909 assert_equal(result, True)
910 ##wait for flows to be added to ONOS
911 time.sleep(1)
912 self.success = False
913 def mac_recv_task():
914 def recv_cb(pkt):
915 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
916 self.success = True
917 sniff(timeout=2,count=5,
918 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
919 for i in [0,1]:
920 if i == 1:
921 log.info('Restart cluster current master %s'%master)
922 Container(master_onos_name,Onos.IMAGE).restart()
923 time.sleep(45)
924 log.info('Verifying flow traffic after master restart')
925 else:
926 log.info('Verifying flow traffic before master restart')
927 t = threading.Thread(target = mac_recv_task)
928 t.start()
929 L2 = self.flows_eth
930 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
931 pkt = L2/L3
932 log.info('Sending packets to verify if flows are correct')
933 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
934 t.join()
935 assert_equal(self.success, True)
936
937 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
938 dst_mac = self.igmp.iptomac(group)
939 eth = Ether(dst= dst_mac)
940 ip = IP(dst=group,src=source)
941 data = repr(monotonic.monotonic())
942 sendp(eth/ip/data,count=20, iface = intf)
943 pkt = (eth/ip/data)
944 log.info('multicast traffic packet %s'%pkt.show())
945
946 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
947 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
948 self.success = False
949 def recv_task():
950 def igmp_recv_cb(pkt):
951 log.info('multicast data received for group %s from source %s'%(group,source))
952 self.success = True
953 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
954 t = threading.Thread(target = recv_task)
955 t.start()
956 self.send_multicast_data_traffic(group,source=source)
957 t.join()
958 return self.success
959
960 #pass
961 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
962 status = self.verify_cluster_status(onos_instances=onos_instances)
963 assert_equal(status, True)
964 master, standbys = self.get_cluster_current_master_standbys()
965 assert_equal(len(standbys), (onos_instances-1))
966 onos_names_ips = self.get_cluster_container_names_ips()
967 master_onos_name = onos_names_ips[master]
968 self.igmp.setUp(controller=master)
969 groups = ['224.2.3.4','230.5.6.7']
970 src_list = ['2.2.2.2','3.3.3.3']
971 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
972 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
973 iface = self.V_INF1, delay = 2)
974 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
975 iface = self.V_INF1, delay = 2)
976 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
977 assert_equal(status,True)
978 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
979 assert_equal(status,False)
980 log.info('restarting cluster master %s'%master)
981 Container(master_onos_name,Onos.IMAGE).restart()
982 time.sleep(60)
983 log.info('verifying multicast data traffic after master restart')
984 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
985 assert_equal(status,True)
986 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
987 assert_equal(status,False)
988
989 #pass
990 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
991 status = self.verify_cluster_status(onos_instances=onos_instances)
992 assert_equal(status, True)
993 master, standbys = self.get_cluster_current_master_standbys()
994 assert_equal(len(standbys), (onos_instances-1))
995 onos_names_ips = self.get_cluster_container_names_ips()
996 master_onos_name = onos_names_ips[master]
997 self.igmp.setUp(controller=master)
998 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
999 src_list = [self.igmp.randomsourceip()]
1000 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1001 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1002 iface = self.V_INF1, delay = 2)
1003 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1004 iface = self.V_INF1, delay = 2)
1005 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1006 assert_equal(status,True)
1007 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1008 assert_equal(status,False)
1009 log.info('Killing cluster master %s'%master)
1010 Container(master_onos_name,Onos.IMAGE).kill()
1011 time.sleep(60)
1012 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1013 assert_equal(status, True)
1014 log.info('Verifying multicast data traffic after cluster master down')
1015 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1016 assert_equal(status,True)
1017 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1018 assert_equal(status,False)
1019
1020 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1021 status = self.verify_cluster_status(onos_instances=onos_instances)
1022 assert_equal(status, True)
1023 master, standbys = self.get_cluster_current_master_standbys()
1024 assert_equal(len(standbys), (onos_instances-1))
1025 onos_names_ips = self.get_cluster_container_names_ips()
1026 master_onos_name = onos_names_ips[master]
1027 self.igmp.setUp(controller=master)
1028 groups = [self.igmp.random_mcast_ip()]
1029 src_list = [self.igmp.randomsourceip()]
1030 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1031 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1032 iface = self.V_INF1, delay = 2)
1033 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1034 assert_equal(status,True)
1035 log.info('Killing clusters master %s'%master)
1036 Container(master_onos_name,Onos.IMAGE).kill()
1037 count = 0
1038 for i in range(60):
1039 log.info('Verifying multicast data traffic after cluster master down')
1040 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1041 if status:
1042 break
1043 else:
1044 count += 1
1045 time.sleep(1)
1046 assert_equal(status, True)
1047 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1048
1049
1050 #pass
1051 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1052 status = self.verify_cluster_status(onos_instances=onos_instances)
1053 assert_equal(status, True)
1054 master, standbys = self.get_cluster_current_master_standbys()
1055 assert_equal(len(standbys), (onos_instances-1))
1056 self.igmp.setUp(controller=master)
1057 groups = [self.igmp.random_mcast_ip()]
1058 src_list = [self.igmp.randomsourceip()]
1059 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1060 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1061 iface = self.V_INF1, delay = 2)
1062 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1063 assert_equal(status,True)
1064 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1065 self.change_cluster_current_master(new_master=standbys[0])
1066 log.info('Verifying multicast traffic after cluster master change')
1067 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1068 assert_equal(status,True)
1069 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1070 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1071 iface = self.V_INF1, delay = 1)
1072 time.sleep(10)
1073 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1074 assert_equal(status,False)
1075
1076 #pass
1077 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1078 status = self.verify_cluster_status(onos_instances=onos_instances)
1079 assert_equal(status, True)
1080 master,standbys = self.get_cluster_current_master_standbys()
1081 assert_equal(len(standbys), (onos_instances-1))
1082 self.igmp.setUp(controller=master)
1083 groups = [self.igmp.random_mcast_ip()]
1084 src_list = [self.igmp.randomsourceip()]
1085 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1086 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1087 self.change_cluster_current_master(new_master = standbys[0])
1088 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1089 iface = self.V_INF1, delay = 2)
1090 time.sleep(1)
1091 self.change_cluster_current_master(new_master = master)
1092 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1093 assert_equal(status,True)
1094
1095 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001096 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001097 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001098 status = self.verify_cluster_status(onos_instances=onos_instances)
1099 assert_equal(status, True)
1100 master, standbys = self.get_cluster_current_master_standbys()
1101 assert_equal(len(standbys), (onos_instances-1))
1102 self.tls.setUp(controller=master)
1103 df = defer.Deferred()
1104 def eap_tls_verify(df):
1105 tls = TLSAuthTest()
1106 tls.runTest()
1107 df.callback(0)
1108 reactor.callLater(0, eap_tls_verify, df)
1109 return df
1110
1111 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001112 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001113 master, standbys = self.get_cluster_current_master_standbys()
1114 assert_equal(len(standbys), (onos_instances-1))
1115 self.tls.setUp()
1116 df = defer.Deferred()
1117 def eap_tls_verify2(df2):
1118 tls = TLSAuthTest()
1119 tls.runTest()
1120 df.callback(0)
1121 for i in [0,1]:
1122 if i == 1:
1123 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1124 self.change_master_current_cluster(new_master=standbys[0])
1125 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1126 else:
1127 log.info('Verifying tls authentication before cluster master change')
1128 reactor.callLater(0, eap_tls_verify, df)
1129 return df
1130
1131 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001132 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001133 status = self.verify_cluster_status(onos_instances=onos_instances)
1134 assert_equal(status, True)
1135 master, standbys = self.get_cluster_current_master_standbys()
1136 assert_equal(len(standbys), (onos_instances-1))
1137 onos_names_ips = self.get_cluster_container_names_ips()
1138 master_onos_name = onos_names_ips[master]
1139 self.tls.setUp()
1140 df = defer.Deferred()
1141 def eap_tls_verify(df):
1142 tls = TLSAuthTest()
1143 tls.runTest()
1144 df.callback(0)
1145 for i in [0,1]:
1146 if i == 1:
1147 log.info('Killing cluster current master %s'%master)
1148 cord_test_onos_shutdown(node = master_onos_name)
1149 time.sleep(20)
1150 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1151 assert_equal(status, True)
1152 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1153 log.info('Verifying tls authentication after killing cluster master')
1154 reactor.callLater(0, eap_tls_verify, df)
1155 return df
1156
1157 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001158 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001159 status = self.verify_cluster_status(onos_instances=onos_instances)
1160 assert_equal(status, True)
1161 master, standbys = self.get_cluster_current_master_standbys()
1162 assert_equal(len(standbys), (onos_instances-1))
1163 onos_names_ips = self.get_cluster_container_names_ips()
1164 member_onos_name = onos_names_ips[standbys[0]]
1165 self.tls.setUp()
1166 df = defer.Deferred()
1167 def eap_tls_no_cert(df):
1168 def tls_no_cert_cb():
1169 log.info('TLS authentication failed with no certificate')
1170 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1171 tls.runTest()
1172 assert_equal(tls.failTest, True)
1173 df.callback(0)
1174 for i in [0,1]:
1175 if i == 1:
1176 log.info('Restart cluster member %s'%standbys[0])
1177 Container(member_onos_name,Onos.IMAGE).restart()
1178 time.sleep(20)
1179 status = self.verify_cluster_status(onos_instances=onos_instances)
1180 assert_equal(status, True)
1181 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1182 log.info('Verifying tls authentication after member restart')
1183 reactor.callLater(0, eap_tls_no_cert, df)
1184 return df
1185
ChetanGaonker689b3862016-10-17 16:25:01 -07001186 #pass
1187 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1188 status = self.verify_cluster_status(onos_instances=onos_instances)
1189 assert_equal(status,True)
1190 master,standbys = self.get_cluster_current_master_standbys()
1191 assert_equal(len(standbys),(onos_instances-1))
1192 self.proxyarp.setUpClass()
1193 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1194 ingress = hosts+1
1195 for hostip, hostmac in hosts_config:
1196 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1197 time.sleep(1)
1198 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1199 self.change_cluster_current_master(new_master=standbys[0])
1200 log.info('verifying proxyarp after master change')
1201 for hostip, hostmac in hosts_config:
1202 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1203 time.sleep(1)
1204 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1205 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1206 time.sleep(3)
1207 for hostip, hostmac in hosts_config:
1208 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1209 time.sleep(1)
1210 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1211 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1212 time.sleep(3)
1213 for hostip, hostmac in hosts_config:
1214 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1215 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001216
ChetanGaonker689b3862016-10-17 16:25:01 -07001217 #pass
1218 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001219 status = self.verify_cluster_status(onos_instances=onos_instances)
1220 assert_equal(status, True)
1221 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001222 assert_equal(len(standbys), (onos_instances-1))
1223 onos_names_ips = self.get_cluster_container_names_ips()
1224 member_onos_name = onos_names_ips[standbys[1]]
1225 self.proxyarp.setUpClass()
1226 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1227 ingress = hosts+1
1228 for hostip, hostmac in hosts_config:
1229 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1230 time.sleep(1)
1231 log.info('killing cluster member %s'%standbys[1])
1232 Container(member_onos_name,Onos.IMAGE).kill()
1233 time.sleep(20)
1234 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1235 assert_equal(status, True)
1236 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1237 log.info('verifying proxy arp functionality after cluster member down')
1238 for hostip, hostmac in hosts_config:
1239 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1240 time.sleep(1)
1241
1242 #pass
1243 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1244 status = self.verify_cluster_status(onos_instances=onos_instances)
1245 assert_equal(status, True)
1246 self.proxyarp.setUpClass()
1247 master, standbys = self.get_cluster_current_master_standbys()
1248 assert_equal(len(standbys), (onos_instances-1))
1249 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1250 self.success = True
1251 ingress = hosts+1
1252 ports = range(ingress,ingress+10)
1253 hostmac = []
1254 hostip = []
1255 for ip,mac in hosts_config:
1256 hostmac.append(mac)
1257 hostip.append(ip)
1258 success_dir = {}
1259 def verify_proxyarp(*r):
1260 ingress, hostmac, hostip = r[0],r[1],r[2]
1261 def mac_recv_task():
1262 def recv_cb(pkt):
1263 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1264 success_dir[current_thread().name] = True
1265 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1266 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1267 t = threading.Thread(target = mac_recv_task)
1268 t.start()
1269 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1270 log.info('Sending arp request for dest ip %s on interface %s' %
1271 (hostip,self.proxyarp.port_map[ingress]))
1272 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1273 t.join()
1274 t = []
1275 for i in range(10):
1276 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1277 for i in range(10):
1278 t[i].start()
1279 time.sleep(2)
1280 for i in range(10):
1281 t[i].join()
1282 if len(success_dir) != 10:
1283 self.success = False
1284 assert_equal(self.success, True)
1285
1286 #pass
1287 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1288 status = self.verify_cluster_status(onos_instances=onos_instances)
1289 assert_equal(status, True)
1290 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001291 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001292 self.acl.setUp()
1293 acl_rule = ACLTest()
1294 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1295 if status is False:
1296 log.info('JSON request returned status %d' %code)
1297 assert_equal(status, True)
1298 result = acl_rule.get_acl_rules(controller=master)
1299 aclRules1 = result.json()['aclRules']
1300 log.info('Added acl rules is %s'%aclRules1)
1301 acl_Id = map(lambda d: d['id'], aclRules1)
1302 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1303 self.change_cluster_current_master(new_master=standbys[0])
1304 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1305 if status is False:
1306 log.info('JSON request returned status %d' %code)
1307 assert_equal(status, True)
1308
1309 #pass
1310 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1311 status = self.verify_cluster_status(onos_instances=onos_instances)
1312 assert_equal(status, True)
1313 master,standbys = self.get_cluster_current_master_standbys()
1314 assert_equal(len(standbys),(onos_instances-1))
1315 onos_names_ips = self.get_cluster_container_names_ips()
1316 master_onos_name = onos_names_ips[master]
1317 self.acl.setUp()
1318 acl_rule = ACLTest()
1319 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1320 if status is False:
1321 log.info('JSON request returned status %d' %code)
1322 assert_equal(status, True)
1323 result1 = acl_rule.get_acl_rules(controller=master)
1324 aclRules1 = result1.json()['aclRules']
1325 log.info('Added acl rules is %s'%aclRules1)
1326 acl_Id1 = map(lambda d: d['id'], aclRules1)
1327 log.info('Killing cluster current master %s'%master)
1328 Container(master_onos_name,Onos.IMAGE).kill()
1329 time.sleep(45)
1330 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1331 assert_equal(status, True)
1332 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1333 assert_equal(len(standbys),(onos_instances-2))
1334 assert_not_equal(new_master,master)
1335 result2 = acl_rule.get_acl_rules(controller=new_master)
1336 aclRules2 = result2.json()['aclRules']
1337 acl_Id2 = map(lambda d: d['id'], aclRules2)
1338 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1339 assert_equal(acl_Id2,acl_Id1)
1340
1341 #acl traffic scenario not working as acl rule is not getting added to onos
1342 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1343 status = self.verify_cluster_status(onos_instances=onos_instances)
1344 assert_equal(status, True)
1345 master,standbys = self.get_cluster_current_master_standbys()
1346 assert_equal(len(standbys),(onos_instances-1))
1347 onos_names_ips = self.get_cluster_container_names_ips()
1348 member1_onos_name = onos_names_ips[standbys[0]]
1349 member2_onos_name = onos_names_ips[standbys[1]]
1350 ingress = self.acl.ingress_iface
1351 egress = self.acl.CURRENT_PORT_NUM
1352 acl_rule = ACLTest()
1353 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1354 self.acl.CURRENT_PORT_NUM += 1
1355 time.sleep(5)
1356 if status is False:
1357 log.info('JSON request returned status %d' %code)
1358 assert_equal(status, True)
1359 srcMac = '00:00:00:00:00:11'
1360 dstMac = host_ip_mac[0][1]
1361 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1362 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1363 time.sleep(10)
1364 if status is False:
1365 log.info('JSON request returned status %d' %code)
1366 assert_equal(status, True)
1367 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1368 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1369 Container(member1_onos_name, Onos.IMAGE).kill()
1370 Container(member2_onos_name, Onos.IMAGE).kill()
1371 time.sleep(40)
1372 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1373 assert_equal(status, True)
1374 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1375 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1376
1377 #pass
1378 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1379 status = self.verify_cluster_status(onos_instances=onos_instances)
1380 assert_equal(status, True)
1381 master,standbys = self.get_cluster_current_master_standbys()
1382 assert_equal(len(standbys),(onos_instances-1))
1383 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001384 mac = self.dhcprelay.get_mac(iface)
1385 self.dhcprelay.host_load(iface)
1386 ##we use the defaults for this test that serves as an example for others
1387 ##You don't need to restart dhcpd server if retaining default config
1388 config = self.dhcprelay.default_config
1389 options = self.dhcprelay.default_options
1390 subnet = self.dhcprelay.default_subnet_config
1391 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1392 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1393 config = config,
1394 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001395 subnet = subnet,
1396 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001397 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1398 cip, sip = self.dhcprelay.send_recv(mac)
1399 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1400 self.change_master_current_cluster(new_master=standbys[0])
1401 log.info('Releasing ip %s to server %s' %(cip, sip))
1402 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1403 log.info('Triggering DHCP discover again after release')
1404 cip2, sip2 = self.dhcprelay.send_recv(mac)
1405 log.info('Verifying released IP was given back on rediscover')
1406 assert_equal(cip, cip2)
1407 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1408 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001409 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001410
ChetanGaonker689b3862016-10-17 16:25:01 -07001411
1412 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1413 status = self.verify_cluster_status(onos_instances=onos_instances)
1414 assert_equal(status, True)
1415 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001416 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001417 onos_names_ips = self.get_cluster_container_names_ips()
1418 master_onos_name = onos_names_ips[master]
1419 self.dhcprelay.setUpClass(controller=master)
1420 mac = self.dhcprelay.get_mac(iface)
1421 self.dhcprelay.host_load(iface)
1422 ##we use the defaults for this test that serves as an example for others
1423 ##You don't need to restart dhcpd server if retaining default config
1424 config = self.dhcprelay.default_config
1425 options = self.dhcprelay.default_options
1426 subnet = self.dhcprelay.default_subnet_config
1427 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1428 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1429 config = config,
1430 options = options,
1431 subnet = subnet,
1432 controller=master)
1433 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1434 log.info('Initiating dhcp process from client %s'%mac)
1435 cip, sip = self.dhcprelay.send_recv(mac)
1436 log.info('Killing cluster current master %s'%master)
1437 Container(master_onos_name, Onos.IMAGE).kill()
1438 time.sleep(60)
1439 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1440 assert_equal(status, True)
1441 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1442 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1443 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1444 assert_equal(new_cip, cip)
1445 self.dhcprelay.tearDownClass(controller=standbys[0])
1446
1447 #pass
1448 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1449 status = self.verify_cluster_status(onos_instances=onos_instances)
1450 assert_equal(status, True)
1451 master,standbys = self.get_cluster_current_master_standbys()
1452 assert_equal(len(standbys),(onos_instances-1))
1453 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001454 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1455 self.dhcprelay.host_load(iface)
1456 ##we use the defaults for this test that serves as an example for others
1457 ##You don't need to restart dhcpd server if retaining default config
1458 config = self.dhcprelay.default_config
1459 options = self.dhcprelay.default_options
1460 subnet = self.dhcprelay.default_subnet_config
1461 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1462 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1463 config = config,
1464 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001465 subnet = subnet,
1466 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001467 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1468 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1469 assert_not_equal(cip1,None)
1470 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1471 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1472 self.change_master_current_cluster(new_master=standbys[0])
1473 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1474 assert_not_equal(cip2,None)
1475 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1476 self.change_master_current_cluster(new_master=master)
1477 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1478 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1479 assert_not_equal(cip3,None)
1480 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001481 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001482
ChetanGaonker689b3862016-10-17 16:25:01 -07001483 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001484 status = self.verify_cluster_status(onos_instances=onos_instances)
1485 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001486 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001487 self.subscriber.num_subscribers = 5
1488 self.subscriber.num_channels = 10
1489 for i in [0,1]:
1490 if i == 1:
1491 cord_test_onos_restart()
1492 time.sleep(45)
1493 status = self.verify_cluster_status(onos_instances=onos_instances)
1494 assert_equal(status, True)
1495 log.info('Verifying cord subscriber functionality after cluster restart')
1496 else:
1497 log.info('Verifying cord subscriber functionality before cluster restart')
1498 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1499 num_channels = self.subscriber.num_channels,
1500 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1501 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1502 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1503 self.subscriber.num_channels))
1504 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001505 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001506
ChetanGaonker689b3862016-10-17 16:25:01 -07001507 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1508 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1509 status = self.verify_cluster_status(onos_instances=onos_instances)
1510 assert_equal(status, True)
1511 master,standbys = self.get_cluster_current_master_standbys()
1512 assert_equal(len(standbys),(onos_instances-1))
1513 self.subscriber.setUpClass(controller=master)
1514 self.subscriber.num_subscribers = 5
1515 self.subscriber.num_channels = 10
1516 for i in [0,1]:
1517 if i == 1:
1518 status=self.withdraw_cluster_current_mastership(master_ip=master)
1519 asser_equal(status, True)
1520 master,standbys = self.get_cluster_current_master_standbys()
1521 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1522 else:
1523 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1524 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1525 num_channels = self.subscriber.num_channels,
1526 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1527 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1528 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1529 self.subscriber.num_channels),controller=master)
1530 assert_equal(test_status, True)
1531 self.subscriber.tearDownClass(controller=master)
1532
1533 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1534 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001535 status = self.verify_cluster_status(onos_instances=onos_instances)
1536 assert_equal(status, True)
1537 master, standbys = self.get_cluster_current_master_standbys()
1538 assert_equal(len(standbys),(onos_instances-1))
1539 onos_names_ips = self.get_cluster_container_names_ips()
1540 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001541 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001542 num_subscribers = 1
1543 num_channels = 10
1544 for i in [0,1]:
1545 if i == 1:
1546 cord_test_onos_shutdown(node = member_onos_name)
1547 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001548 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001549 assert_equal(status, True)
1550 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1551 else:
1552 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1553 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1554 num_channels = num_channels,
1555 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1556 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1557 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001558 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001559 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001560 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001561
ChetanGaonker689b3862016-10-17 16:25:01 -07001562 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001563 status = self.verify_cluster_status(onos_instances=onos_instances)
1564 assert_equal(status, True)
1565 master, standbys = self.get_cluster_current_master_standbys()
1566 assert_equal(len(standbys),(onos_instances-1))
1567 onos_names_ips = self.get_cluster_container_names_ips()
1568 member1_onos_name = onos_names_ips[standbys[0]]
1569 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001570 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001571 num_subscribers = 1
1572 num_channels = 10
1573 for i in [0,1]:
1574 if i == 1:
1575 cord_test_onos_shutdown(node = member1_onos_name)
1576 cord_test_onos_shutdown(node = member2_onos_name)
1577 time.sleep(60)
1578 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1579 assert_equal(status, True)
1580 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1581 else:
1582 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1583 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1584 num_channels = num_channels,
1585 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1586 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1587 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1588 negative_subscriber_auth = 'all')
1589 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001590 self.subscriber.tearDownClass(controller=master)
1591
1592 #pass
1593 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1594 status = self.verify_cluster_status(onos_instances=onos_instances)
1595 assert_equal(status, True)
1596 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1597 for device in device_dict.keys():
1598 log.info("Device is %s"%device_dict[device])
1599 assert_not_equal(device_dict[device]['master'],'none')
1600 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1601 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1602
1603 #pass
1604 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1605 status = self.verify_cluster_status(onos_instances=onos_instances)
1606 assert_equal(status, True)
1607 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1608 cluster_ips = self.get_cluster_current_member_ips()
1609 for ip in cluster_ips:
1610 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1611 assert_equal(len(device_dict.keys()),onos_instances)
1612 for device in device_dict.keys():
1613 log.info("Device is %s"%device_dict[device])
1614 assert_not_equal(device_dict[device]['master'],'none')
1615 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1616 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1617
1618 #pass
1619 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1620 status = self.verify_cluster_status(onos_instances=onos_instances)
1621 assert_equal(status, True)
1622 onos_names_ips = self.get_cluster_container_names_ips()
1623 master_count = self.get_number_of_devices_of_master()
1624 log.info('Master count information is %s'%master_count)
1625 total_devices = 0
1626 for master in master_count.keys():
1627 total_devices += master_count[master]['size']
1628 if master_count[master]['size'] != 0:
1629 restart_ip = master
1630 assert_equal(total_devices,onos_instances)
1631 member_onos_name = onos_names_ips[restart_ip]
1632 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1633 Container(member_onos_name, Onos.IMAGE).restart()
1634 time.sleep(40)
1635 master_count = self.get_number_of_devices_of_master()
1636 log.info('Master count information after restart is %s'%master_count)
1637 total_devices = 0
1638 for master in master_count.keys():
1639 total_devices += master_count[master]['size']
1640 if master == restart_ip:
1641 assert_equal(master_count[master]['size'], 0)
1642 assert_equal(total_devices,onos_instances)
1643
1644 #pass
1645 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1646 status = self.verify_cluster_status(onos_instances=onos_instances)
1647 assert_equal(status, True)
1648 onos_names_ips = self.get_cluster_container_names_ips()
1649 master_count = self.get_number_of_devices_of_master()
1650 log.info('Master count information is %s'%master_count)
1651 total_devices = 0
1652 for master in master_count.keys():
1653 total_devices += master_count[master]['size']
1654 if master_count[master]['size'] != 0:
1655 restart_ip = master
1656 assert_equal(total_devices,onos_instances)
1657 master_onos_name = onos_names_ips[restart_ip]
1658 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1659 Container(master_onos_name, Onos.IMAGE).kill()
1660 time.sleep(40)
1661 for ip in onos_names_ips.keys():
1662 if ip != restart_ip:
1663 controller_ip = ip
1664 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1665 assert_equal(status, True)
1666 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1667 log.info('Master count information after restart is %s'%master_count)
1668 total_devices = 0
1669 for master in master_count.keys():
1670 total_devices += master_count[master]['size']
1671 if master == restart_ip:
1672 assert_equal(master_count[master]['size'], 0)
1673 assert_equal(total_devices,onos_instances)
1674
1675 #pass
1676 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1677 status = self.verify_cluster_status(onos_instances=onos_instances)
1678 assert_equal(status, True)
1679 master_count = self.get_number_of_devices_of_master()
1680 log.info('Master count information is %s'%master_count)
1681 total_devices = 0
1682 for master in master_count.keys():
1683 total_devices += int(master_count[master]['size'])
1684 if master_count[master]['size'] != 0:
1685 master_ip = master
1686 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1687 device_id = str(master_count[master]['devices'][0])
1688 device_count = master_count[master]['size']
1689 assert_equal(total_devices,onos_instances)
1690 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1691 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1692 assert_equal(status, True)
1693 master_count = self.get_number_of_devices_of_master()
1694 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1695 total_devices = 0
1696 for master in master_count.keys():
1697 total_devices += int(master_count[master]['size'])
1698 if master == master_ip:
1699 assert_equal(master_count[master]['size'], device_count-1)
1700 assert_equal(total_devices,onos_instances)
1701
1702 #pass
1703 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1704 status = self.verify_cluster_status(onos_instances=onos_instances)
1705 assert_equal(status, True)
1706 master_count = self.get_number_of_devices_of_master()
1707 log.info('Master count information is %s'%master_count)
1708 total_devices = 0
1709 for master in master_count.keys():
1710 total_devices += master_count[master]['size']
1711 assert_equal(total_devices,onos_instances)
1712 log.info('Restarting cluster')
1713 cord_test_onos_restart()
1714 time.sleep(60)
1715 master_count = self.get_number_of_devices_of_master()
1716 log.info('Master count information after restart is %s'%master_count)
1717 total_devices = 0
1718 for master in master_count.keys():
1719 total_devices += master_count[master]['size']
1720 assert_equal(total_devices,onos_instances)