VOL-4217: Update to latest version of protos and voltha-lib-go for
voltha-2.9 release. Tested with BAL3.10

Change-Id: Ibdc5978a1f2df713965a27ce26d0e22c1ffa366a
diff --git a/core/att_workflow.go b/core/att_workflow.go
index d98cbeb..4c11734 100644
--- a/core/att_workflow.go
+++ b/core/att_workflow.go
@@ -21,9 +21,9 @@
 	"strings"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -69,36 +69,40 @@
 }
 
 func AddDhcpIPV6Flow(oo oop.OpenoltClient, config *config.OpenOltScaleTesterConfig, rsrMgr *OpenOltResourceMgr) error {
-	var flowID uint64
-	var err error
+	// We do not support DHCPv6 at the moment. Uncomment the below code when it is supported in the future
+	/*
+		var flowID uint64
+		var err error
 
-	if flowID, err = rsrMgr.GetFlowID(context.Background(), uint32(config.NniIntfID)); err != nil {
-		return err
-	}
+		if flowID, err = rsrMgr.GetFlowID(context.Background(), uint32(config.NniIntfID)); err != nil {
+			return err
+		}
 
-	// DHCP IPV6
-	flowClassifier := &oop.Classifier{EthType: 34525, IpProto: 17, SrcPort: 546, DstPort: 547, PktTagType: "double_tag"}
-	actionCmd := &oop.ActionCmd{TrapToHost: true}
-	actionInfo := &oop.Action{Cmd: actionCmd}
+		// DHCP IPV6
+		flowClassifier := &oop.Classifier{EthType: 34525, IpProto: 17, SrcPort: 546, DstPort: 547, PktTagType: "double_tag"}
+		actionCmd := &oop.ActionCmd{TrapToHost: true}
+		actionInfo := &oop.Action{Cmd: actionCmd}
 
-	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID,
-		FlowType: "downstream", AllocId: -1, GemportId: -1,
-		Classifier: flowClassifier, Action: actionInfo,
-		Priority: 1000, PortNo: uint32(config.NniIntfID)}
+		flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID,
+			FlowType: "downstream", AllocId: -1, GemportId: -1,
+			Classifier: flowClassifier, Action: actionInfo,
+			Priority: 1000, PortNo: uint32(config.NniIntfID)}
 
-	_, err = oo.FlowAdd(context.Background(), &flow)
+		_, err = oo.FlowAdd(context.Background(), &flow)
 
-	st, _ := status.FromError(err)
-	if st.Code() == codes.AlreadyExists {
-		logger.Debugw(nil, "Flow already exists", log.Fields{"err": err, "deviceFlow": flow})
-		return nil
-	}
+		st, _ := status.FromError(err)
+		if st.Code() == codes.AlreadyExists {
+			logger.Debugw(nil, "Flow already exists", log.Fields{"err": err, "deviceFlow": flow})
+			return nil
+		}
 
-	if err != nil {
-		logger.Errorw(nil, "Failed to Add DHCP IPV6 to device", log.Fields{"err": err, "deviceFlow": flow})
-		return err
-	}
-	logger.Debugw(nil, "DHCP IPV6 added to device successfully ", log.Fields{"flow": flow})
+		if err != nil {
+			logger.Errorw(nil, "Failed to Add DHCP IPV6 to device", log.Fields{"err": err, "deviceFlow": flow})
+			return err
+		}
+		logger.Debugw(nil, "DHCP IPV6 added to device successfully ", log.Fields{"flow": flow})
+
+	*/
 
 	return nil
 }
@@ -195,9 +199,9 @@
 	var gemPortIDs []uint32
 	pbitToGem := make(map[uint32]uint32)
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
@@ -227,9 +231,9 @@
 	var gemPortIDs []uint32
 	pbitToGem := make(map[uint32]uint32)
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
@@ -254,34 +258,38 @@
 }
 
 func (att AttWorkFlow) ProvisionDhcpIPV6Flow(subs *Subscriber) error {
-	var err error
-	var flowID uint64
-	var gemPortIDs []uint32
-	pbitToGem := make(map[uint32]uint32)
+	// We do not support DHCPv6 at the moment. Uncomment the below code when it is supported in the future
+	/*
+		var err error
+		var flowID uint64
+		var gemPortIDs []uint32
+		pbitToGem := make(map[uint32]uint32)
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
-	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
-	}
+		var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
+		for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
+			gemPortIDs = append(gemPortIDs, gem.GemportId)
+		}
 
-	for idx, gemID := range gemPortIDs {
-		pBitMap := subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList[idx].PbitMap
-		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
-			if pbitSet == '1' {
-				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				pbitToGem[pcp] = gemID
+		for idx, gemID := range gemPortIDs {
+			pBitMap := subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList[idx].PbitMap
+			for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
+				if pbitSet == '1' {
+					pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
+					pbitToGem[pcp] = gemID
+				}
 			}
 		}
-	}
 
-	// This flowID is not the BAL flow ID now, it is the voltha-flow-id
-	if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
-		return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
-	}
-	if err := AddFlow(subs, DhcpFlowIPV6, Upstream, flowID, allocID, 0, 0xff,
-		true, 0, pbitToGem); err != nil {
-		return err
-	}
+		// This flowID is not the BAL flow ID now, it is the voltha-flow-id
+		if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
+			return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
+		}
+		if err := AddFlow(subs, DhcpFlowIPV6, Upstream, flowID, allocID, 0, 0xff,
+			true, 0, pbitToGem); err != nil {
+			return err
+		}
+
+	*/
 	return nil
 }
 
@@ -296,9 +304,9 @@
 	var gemPortIDs []uint32
 	pbitToGem := make(map[uint32]uint32)
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
diff --git a/core/common.go b/core/common.go
index b03022e..7230b40 100644
--- a/core/common.go
+++ b/core/common.go
@@ -18,7 +18,7 @@
 package core
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/core/dt_workflow.go b/core/dt_workflow.go
index 97870ee..4aa3bdb 100644
--- a/core/dt_workflow.go
+++ b/core/dt_workflow.go
@@ -21,9 +21,9 @@
 	"strings"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"golang.org/x/net/context"
 )
 
@@ -142,9 +142,9 @@
 	var gemPortIDs []uint32
 	pbitToGem := make(map[uint32]uint32)
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
diff --git a/core/olt_manager.go b/core/olt_manager.go
index ab171ef..9264e22 100644
--- a/core/olt_manager.go
+++ b/core/olt_manager.go
@@ -22,8 +22,10 @@
 	"encoding/json"
 	"errors"
 	"fmt"
+	"github.com/golang/protobuf/jsonpb"
+	ponrmgr "github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager"
+	"github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"io"
-	"io/ioutil"
 	"os"
 	"strconv"
 	"sync"
@@ -32,10 +34,9 @@
 
 	"github.com/cenkalti/backoff/v3"
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	"github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -55,11 +56,10 @@
 	ipPort             string
 	deviceInfo         *oop.DeviceInfo
 	OnuDeviceMap       map[OnuDeviceKey]*OnuDevice `json:"onuDeviceMap"`
-	TechProfile        map[uint32]*techprofile.TechProfileIf
 	clientConn         *grpc.ClientConn
 	openOltClient      oop.OpenoltClient
 	testConfig         *config.OpenOltScaleTesterConfig
-	rsrMgr             *OpenOltResourceMgr
+	rsrMgr             []*OpenOltResourceMgr
 	lockRsrAlloc       sync.RWMutex
 	lockOpenOltManager sync.RWMutex
 }
@@ -75,7 +75,6 @@
 }
 
 func (om *OpenOltManager) readAndLoadTPsToEtcd() {
-	var byteValue []byte
 	var err error
 	// Verify that etcd is up before starting the application.
 	etcdIpPort := "http://" + om.testConfig.KVStoreHost + ":" + strconv.Itoa(om.testConfig.KVStorePort)
@@ -98,14 +97,17 @@
 		}
 		logger.Debugw(nil, "tp-file-opened-successfully", log.Fields{"tpFile": tpFilePath})
 
-		// read our opened json file as a byte array.
-		if byteValue, err = ioutil.ReadAll(jsonFile); err != nil {
-			logger.Fatalw(nil, "could-not-read-tp-file", log.Fields{"err": err, "tpFile": tpFilePath})
-		}
+		/*
+			// read our opened json file as a byte array.
+			if byteValue, err = ioutil.ReadAll(jsonFile); err != nil {
+				logger.Fatalw(nil, "could-not-read-tp-file", log.Fields{"err": err, "tpFile": tpFilePath})
+			}
 
-		var tp techprofile.TechProfile
+		*/
 
-		if err = json.Unmarshal(byteValue, &tp); err != nil {
+		var tp tech_profile.TechProfile
+
+		if err = jsonpb.Unmarshal(jsonFile, &tp); err != nil {
 			logger.Fatalw(nil, "could-not-unmarshal-tp", log.Fields{"err": err, "tpFile": tpFilePath})
 		} else {
 			logger.Infow(nil, "tp-read-from-file", log.Fields{"tp": tp, "tpFile": tpFilePath})
@@ -121,8 +123,8 @@
 		if kvResult == nil {
 			logger.Fatal(nil, "tp-not-found-on-kv-after-load", log.Fields{"key": kvPath, "err": err})
 		} else {
-			var KvTpIns techprofile.TechProfile
-			var resPtr = &KvTpIns
+			var KvTp tech_profile.TechProfile
+			var resPtr = &KvTp
 			if value, err := kvstore.ToByte(kvResult.Value); err == nil {
 				if err = json.Unmarshal(value, resPtr); err != nil {
 					logger.Fatal(nil, "error-unmarshal-kv-result", log.Fields{"err": err, "key": kvPath, "value": value})
@@ -157,24 +159,20 @@
 	om.readAndLoadTPsToEtcd()
 
 	logger.Info(nil, "etcd-up-and-running--tp-loaded-successfully")
-
-	if om.rsrMgr = NewResourceMgr("ABCD", om.testConfig.KVStoreHost+":"+strconv.Itoa(om.testConfig.KVStorePort),
-		"etcd", "openolt", om.deviceInfo); om.rsrMgr == nil {
-		logger.Error(nil, "Error while instantiating resource manager")
-		return errors.New("instantiating resource manager failed")
-	}
-
-	om.TechProfile = make(map[uint32]*techprofile.TechProfileIf)
-	if err = om.populateTechProfilePerPonPort(); err != nil {
-		logger.Error(nil, "Error while populating tech profile mgr\n")
-		return errors.New("error-loading-tech-profile-per-ponPort")
+	om.rsrMgr = make([]*OpenOltResourceMgr, om.deviceInfo.PonPorts)
+	for ponIdx := range om.rsrMgr {
+		if om.rsrMgr[ponIdx] = NewResourceMgr("ABCD", om.testConfig.KVStoreHost+":"+strconv.Itoa(om.testConfig.KVStorePort),
+			"etcd", "openolt", uint32(ponIdx), om.deviceInfo); om.rsrMgr == nil {
+			logger.Error(nil, "Error while instantiating resource manager")
+			return errors.New("instantiating resource manager failed")
+		}
 	}
 
 	// Start reading indications
 	go om.readIndications()
 
 	// Provision OLT NNI Trap flows as needed by the Workflow
-	if err = ProvisionNniTrapFlow(om.openOltClient, om.testConfig, om.rsrMgr); err != nil {
+	if err = ProvisionNniTrapFlow(om.openOltClient, om.testConfig, om.rsrMgr[0]); err != nil {
 		logger.Error(nil, "failed-to-add-nni-trap-flow", log.Fields{"err": err})
 	}
 
@@ -205,8 +203,7 @@
 
 func (om *OpenOltManager) provisionONUs() {
 	var numOfONUsPerPon uint
-	var i, j, k, onuID uint32
-	var err error
+	var i, j, k uint32
 	var onuWg sync.WaitGroup
 
 	defer func() {
@@ -255,14 +252,17 @@
 				sn := GenerateNextONUSerialNumber()
 				om.lockRsrAlloc.Unlock()
 				logger.Debugw(nil, "provisioning onu", log.Fields{"onuID": j, "ponPort": i, "serialNum": sn})
-				if onuID, err = om.rsrMgr.GetONUID(j); err != nil {
+				ctx := context.Background()
+				newCtx := context.WithValue(ctx, "ponIf", i)
+				onuIDSl, err := om.rsrMgr[i].TechprofileRef.GetResourceID(newCtx, j, ponrmgr.ONU_ID, 1)
+				if err != nil {
 					logger.Errorw(nil, "error getting onu id", log.Fields{"err": err})
 					continue
 				}
-				logger.Infow(nil, "onu-provision-started-from-olt-manager", log.Fields{"onuId": onuID, "ponIntf": i})
+				logger.Infow(nil, "onu-provision-started-from-olt-manager", log.Fields{"onuId": onuIDSl[0], "ponIntf": i})
 
 				onuWg.Add(1)
-				go om.activateONU(j, onuID, sn, om.stringifySerialNumber(sn), &onuWg)
+				go om.activateONU(j, onuIDSl[0], sn, om.stringifySerialNumber(sn), &onuWg)
 			}
 		}
 		// Wait for the group of ONUs to complete processing before going to next batch of ONUs
@@ -295,7 +295,7 @@
 		PonIntf:       intfID,
 		openOltClient: om.openOltClient,
 		testConfig:    om.testConfig,
-		rsrMgr:        om.rsrMgr,
+		rsrMgr:        om.rsrMgr[intfID],
 		onuWg:         onuWg,
 	}
 	var err error
@@ -452,26 +452,6 @@
 	}
 }
 
-func (om *OpenOltManager) populateTechProfilePerPonPort() error {
-	var tpCount int
-	for _, techRange := range om.deviceInfo.Ranges {
-		for _, intfID := range techRange.IntfIds {
-			om.TechProfile[intfID] = &(om.rsrMgr.ResourceMgrs[intfID].TechProfileMgr)
-			tpCount++
-			logger.Debugw(nil, "Init tech profile done", log.Fields{"intfID": intfID})
-		}
-	}
-	//Make sure we have as many tech_profiles as there are pon ports on the device
-	if tpCount != int(om.deviceInfo.GetPonPorts()) {
-		logger.Errorw(nil, "Error while populating techprofile",
-			log.Fields{"numofTech": tpCount, "numPonPorts": om.deviceInfo.GetPonPorts()})
-		return errors.New("error while populating techprofile mgrs")
-	}
-	logger.Infow(nil, "Populated techprofile for ponports successfully",
-		log.Fields{"numofTech": tpCount, "numPonPorts": om.deviceInfo.GetPonPorts()})
-	return nil
-}
-
 func isPowerOfTwo(numOfOnus uint) bool {
 	return (numOfOnus & (numOfOnus - 1)) == 0
 }
diff --git a/core/onu_manager.go b/core/onu_manager.go
index 5b44b28..3a8aef3 100644
--- a/core/onu_manager.go
+++ b/core/onu_manager.go
@@ -22,8 +22,8 @@
 	"time"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
 )
 
 type SubscriberKey struct {
diff --git a/core/resource_manager.go b/core/resource_manager.go
index 255e1be..bfadf5f 100644
--- a/core/resource_manager.go
+++ b/core/resource_manager.go
@@ -18,29 +18,29 @@
 package core
 
 import (
-	"sync"
-
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	ponrmgr "github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager"
-	"github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	ponrmgr "github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager"
+	"github.com/opencord/voltha-lib-go/v7/pkg/techprofile"
+	"github.com/opencord/voltha-protos/v5/go/openolt"
 	"golang.org/x/net/context"
+	"sync"
+)
+
+const (
+	basePathKvStore = "service/voltha"
 )
 
 // OpenOltResourceMgr holds resource related information as provided below for each field
 type OpenOltResourceMgr struct {
 	deviceInfo *openolt.DeviceInfo
 
-	// This protects concurrent onu_id allocate/delete calls on a per PON port basis
-	OnuIDMgmtLock []sync.RWMutex
-	// This protects concurrent flow_id allocate/delete calls. We do not need this on a
-	// per PON port basis as flow IDs are unique across the OLT.
-	FlowIDMgmtLock sync.RWMutex
-
-	// This protects concurrent GemID and AllocID allocate/delete calls on a per PON port basis
-	GemIDAllocIDLock []sync.RWMutex
+	intfID uint32 // pon interface id
 
 	// array of pon resource managers per interface technology
-	ResourceMgrs map[uint32]*ponrmgr.PONResourceManager
+	PonRsrMgr      *ponrmgr.PONResourceManager
+	TechprofileRef techprofile.TechProfileIf
+
+	FlowIDMgmtLock sync.RWMutex
 
 	flow_id uint64
 }
@@ -48,98 +48,45 @@
 // NewResourceMgr init a New resource manager instance which in turn instantiates pon resource manager
 // instances according to technology. Initializes the default resource ranges for all
 // the resources.
-func NewResourceMgr(deviceID string, KVStoreHostPort string, kvStoreType string, deviceType string, devInfo *openolt.DeviceInfo) *OpenOltResourceMgr {
+func NewResourceMgr(deviceID string, KVStoreHostPort string, kvStoreType string, deviceType string, ponID uint32, devInfo *openolt.DeviceInfo) *OpenOltResourceMgr {
 	var ResourceMgr OpenOltResourceMgr
 	logger.Debugf(nil, "Init new resource manager")
 
 	ResourceMgr.deviceInfo = devInfo
-	NumPONPorts := devInfo.GetPonPorts()
-
-	ResourceMgr.OnuIDMgmtLock = make([]sync.RWMutex, NumPONPorts)
-	ResourceMgr.GemIDAllocIDLock = make([]sync.RWMutex, NumPONPorts)
-	ResourceMgr.FlowIDMgmtLock = sync.RWMutex{}
-
-	Ranges := make(map[string]*openolt.DeviceInfo_DeviceResourceRanges)
-	RsrcMgrsByTech := make(map[string]*ponrmgr.PONResourceManager)
-	ResourceMgr.ResourceMgrs = make(map[uint32]*ponrmgr.PONResourceManager)
-
-	// TODO self.args = registry('main').get_args()
-
-	/*
-	   If a legacy driver returns protobuf without any ranges,s synthesize one from
-	   the legacy global per-device information. This, in theory, is temporary until
-	   the legacy drivers are upgrade to support pool ranges.
-	*/
-	if devInfo.Ranges == nil {
-		var ranges openolt.DeviceInfo_DeviceResourceRanges
-		ranges.Technology = devInfo.GetTechnology()
-
-		var index uint32
-		for index = 0; index < NumPONPorts; index++ {
-			ranges.IntfIds = append(ranges.IntfIds, index)
-		}
-
-		var Pool openolt.DeviceInfo_DeviceResourceRanges_Pool
-		Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_ONU_ID
-		Pool.Start = devInfo.OnuIdStart
-		Pool.End = devInfo.OnuIdEnd
-		Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
-		onuPool := Pool
-		ranges.Pools = append(ranges.Pools, &onuPool)
-
-		Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID
-		Pool.Start = devInfo.AllocIdStart
-		Pool.End = devInfo.AllocIdEnd
-		Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
-		allocPool := Pool
-		ranges.Pools = append(ranges.Pools, &allocPool)
-
-		Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID
-		Pool.Start = devInfo.GemportIdStart
-		Pool.End = devInfo.GemportIdEnd
-		Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
-		gemPool := Pool
-		ranges.Pools = append(ranges.Pools, &gemPool)
-
-		Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID
-		Pool.Start = devInfo.FlowIdStart
-		Pool.End = devInfo.FlowIdEnd
-		Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
-		ranges.Pools = append(ranges.Pools, &Pool)
-		// Add to device info
-		devInfo.Ranges = append(devInfo.Ranges, &ranges)
-	}
-
+	ResourceMgr.intfID = ponID
+	ctx := context.Background()
+	newCtx := context.WithValue(ctx, "ponIf", ponID)
 	// Create a separate Resource Manager instance for each range. This assumes that
 	// each technology is represented by only a single range
-	var GlobalPONRsrcMgr *ponrmgr.PONResourceManager
-	var err error
 	for _, TechRange := range devInfo.Ranges {
-		technology := TechRange.Technology
-		logger.Debugf(nil, "Device info technology %s", technology)
-		Ranges[technology] = TechRange
-		RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(nil, technology, deviceType, deviceID,
-			kvStoreType, KVStoreHostPort)
-		if err != nil {
-			logger.Errorf(nil, "Failed to create pon resource manager instance for technology %s", technology)
-			return nil
+		for _, intfID := range TechRange.IntfIds {
+			if intfID == ponID {
+				technology := TechRange.Technology
+				logger.Debugf(context.Background(), "Device info technology %s, intf-id %v", technology, ponID)
+				rsrMgr, err := ponrmgr.NewPONResourceManager(newCtx, technology, deviceType, deviceID,
+					kvStoreType, KVStoreHostPort, basePathKvStore)
+				if err != nil {
+					logger.Errorf(context.Background(), "Failed to create pon resource manager instance for technology %s", technology)
+					return nil
+				}
+				ResourceMgr.PonRsrMgr = rsrMgr
+				// self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
+				InitializeDeviceResourceRangeAndPool(rsrMgr, TechRange, devInfo)
+				if err := ResourceMgr.PonRsrMgr.InitDeviceResourcePoolForIntf(context.Background(), intfID); err != nil {
+					logger.Fatal(context.Background(), "failed-to-initialize-device-resource-pool-intf-id-%v-device-id", ResourceMgr.intfID)
+					return nil
+				}
+			}
 		}
-		// resource_mgrs_by_tech[technology] = resource_mgr
-		if GlobalPONRsrcMgr == nil {
-			GlobalPONRsrcMgr = RsrcMgrsByTech[technology]
-		}
-		for _, IntfID := range TechRange.IntfIds {
-			ResourceMgr.ResourceMgrs[(IntfID)] = RsrcMgrsByTech[technology]
-		}
-		// self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
-		InitializeDeviceResourceRangeAndPool(RsrcMgrsByTech[technology], GlobalPONRsrcMgr,
-			TechRange, devInfo)
 	}
-	// After we have initialized resource ranges, initialize the
-	// resource pools accordingly.
-	for _, PONRMgr := range RsrcMgrsByTech {
-		_ = PONRMgr.InitDeviceResourcePool(context.Background())
+	var err error
+	ResourceMgr.TechprofileRef, err = techprofile.NewTechProfile(newCtx, ResourceMgr.PonRsrMgr, ResourceMgr.PonRsrMgr.Backend,
+		ResourceMgr.PonRsrMgr.Address, basePathKvStore)
+	if err != nil || ResourceMgr.TechprofileRef == nil {
+		logger.Errorw(nil, "failed-to-allocate-to-techprofile-for-pon-port", log.Fields{"intfID": ponID, "err": err})
+		return nil
 	}
+
 	logger.Info(nil, "Initialization of  resource manager success!")
 	return &ResourceMgr
 }
@@ -148,147 +95,55 @@
 // device specific information. If KV doesn't exist
 // or is broader than the device, the device's information will
 // dictate the range limits
-func InitializeDeviceResourceRangeAndPool(ponRMgr *ponrmgr.PONResourceManager, globalPONRMgr *ponrmgr.PONResourceManager,
+func InitializeDeviceResourceRangeAndPool(ponRMgr *ponrmgr.PONResourceManager,
 	techRange *openolt.DeviceInfo_DeviceResourceRanges, devInfo *openolt.DeviceInfo) {
+	// var ONUIDShared, AllocIDShared, GEMPortIDShared openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType
+	var ONUIDStart, ONUIDEnd, AllocIDStart, AllocIDEnd, GEMPortIDStart, GEMPortIDEnd uint32
+	var ONUIDShared, AllocIDShared, GEMPortIDShared, FlowIDShared uint32
+
+	// The below variables are just dummy and needed to pass as arguments to InitDefaultPONResourceRanges function.
+	// The openolt adapter does not need flowIDs to be managed as it is managed on the OLT device
+	// The UNI IDs are dynamically generated by openonu adapter for every discovered UNI.
+	var flowIDDummyStart, flowIDDummyEnd uint32 = 1, 2
+	var uniIDDummyStart, uniIDDummyEnd uint32 = 0, 1
 
 	// init the resource range pool according to the sharing type
-
-	logger.Debugf(nil, "Resource range pool init for technology %s", ponRMgr.Technology)
-	// first load from KV profiles
-	status := ponRMgr.InitResourceRangesFromKVStore(context.Background())
-	if !status {
-		logger.Debugf(nil, "Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
-	}
-
-	/*
-	   Then apply device specific information. If KV doesn't exist
-	   or is broader than the device, the device's information will
-	   dictate the range limits
-	*/
-	logger.Debugf(nil, "Using device info to init pon resource ranges for tech", ponRMgr.Technology)
-
-	ONUIDStart := devInfo.OnuIdStart
-	ONUIDEnd := devInfo.OnuIdEnd
-	ONUIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
-	ONUIDSharedPoolID := uint32(0)
-	AllocIDStart := devInfo.AllocIdStart
-	AllocIDEnd := devInfo.AllocIdEnd
-	AllocIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
-	AllocIDSharedPoolID := uint32(0)
-	GEMPortIDStart := devInfo.GemportIdStart
-	GEMPortIDEnd := devInfo.GemportIdEnd
-	GEMPortIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
-	GEMPortIDSharedPoolID := uint32(0)
-	FlowIDStart := devInfo.FlowIdStart
-	FlowIDEnd := devInfo.FlowIdEnd
-	FlowIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
-	FlowIDSharedPoolID := uint32(0)
-
-	var FirstIntfPoolID uint32
-	var SharedPoolID uint32
-
-	/*
-	 * As a zero check is made against SharedPoolID to check whether the resources are shared across all intfs
-	 * if resources are shared across interfaces then SharedPoolID is given a positive number.
-	 */
-	for _, FirstIntfPoolID = range techRange.IntfIds {
-		// skip the intf id 0
-		if FirstIntfPoolID == 0 {
-			continue
-		}
-		break
-	}
-
+	logger.Debugw(nil, "Device info init", log.Fields{"technology": techRange.Technology,
+		"onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd,
+		"alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
+		"gemport_id_start": GEMPortIDStart, "gemport_id_end": GEMPortIDEnd,
+		"intf_ids": techRange.IntfIds,
+	})
 	for _, RangePool := range techRange.Pools {
-		if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-			SharedPoolID = FirstIntfPoolID
-		} else if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_SAME_TECH {
-			SharedPoolID = FirstIntfPoolID
-		} else {
-			SharedPoolID = 0
-		}
+		// FIXME: Remove hardcoding
 		if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_ONU_ID {
 			ONUIDStart = RangePool.Start
 			ONUIDEnd = RangePool.End
-			ONUIDShared = RangePool.Sharing
-			ONUIDSharedPoolID = SharedPoolID
+			ONUIDShared = uint32(RangePool.Sharing)
 		} else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID {
 			AllocIDStart = RangePool.Start
 			AllocIDEnd = RangePool.End
-			AllocIDShared = RangePool.Sharing
-			AllocIDSharedPoolID = SharedPoolID
+			AllocIDShared = uint32(RangePool.Sharing)
 		} else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID {
 			GEMPortIDStart = RangePool.Start
 			GEMPortIDEnd = RangePool.End
-			GEMPortIDShared = RangePool.Sharing
-			GEMPortIDSharedPoolID = SharedPoolID
-		} else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID {
-			FlowIDStart = RangePool.Start
-			FlowIDEnd = RangePool.End
-			FlowIDShared = RangePool.Sharing
-			FlowIDSharedPoolID = SharedPoolID
+			GEMPortIDShared = uint32(RangePool.Sharing)
 		}
 	}
 
-	logger.Debugw(nil, "Device info init", log.Fields{"technology": techRange.Technology,
-		"onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd, "onu_id_shared_pool_id": ONUIDSharedPoolID,
-		"alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
-		"alloc_id_shared_pool_id": AllocIDSharedPoolID,
-		"gemport_id_start":        GEMPortIDStart, "gemport_id_end": GEMPortIDEnd,
-		"gemport_id_shared_pool_id": GEMPortIDSharedPoolID,
-		"flow_id_start":             FlowIDStart,
-		"flow_id_end_idx":           FlowIDEnd,
-		"flow_id_shared_pool_id":    FlowIDSharedPoolID,
-		"intf_ids":                  techRange.IntfIds,
-		"uni_id_start":              0,
-		"uni_id_end_idx":            1, /*MaxUNIIDperONU()*/
-	})
-
-	ponRMgr.InitDefaultPONResourceRanges(nil, ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
-		AllocIDStart, AllocIDEnd, AllocIDSharedPoolID,
-		GEMPortIDStart, GEMPortIDEnd, GEMPortIDSharedPoolID,
-		FlowIDStart, FlowIDEnd, FlowIDSharedPoolID, 0, 1,
+	ponRMgr.InitDefaultPONResourceRanges(nil, ONUIDStart, ONUIDEnd, ONUIDShared,
+		AllocIDStart, AllocIDEnd, AllocIDShared,
+		GEMPortIDStart, GEMPortIDEnd, GEMPortIDShared,
+		flowIDDummyStart, flowIDDummyEnd, FlowIDShared, uniIDDummyStart, uniIDDummyEnd,
 		devInfo.PonPorts, techRange.IntfIds)
 
-	// For global sharing, make sure to refresh both local and global resource manager instances' range
-
-	if ONUIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(nil, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
-			"", 0, nil)
-		ponRMgr.UpdateRanges(nil, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
-			"", 0, globalPONRMgr)
-	}
-	if AllocIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(nil, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
-			"", 0, nil)
-
-		ponRMgr.UpdateRanges(nil, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
-			"", 0, globalPONRMgr)
-	}
-	if GEMPortIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(nil, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
-			"", 0, nil)
-		ponRMgr.UpdateRanges(nil, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
-			"", 0, globalPONRMgr)
-	}
-	if FlowIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(nil, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
-			"", 0, nil)
-		ponRMgr.UpdateRanges(nil, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
-			"", 0, globalPONRMgr)
-	}
-
-	// Make sure loaded range fits the platform bit encoding ranges
-	ponRMgr.UpdateRanges(nil, ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
 }
 
 // Delete clears used resources for the particular olt device being deleted
 func (RsrcMgr *OpenOltResourceMgr) Delete() error {
-	for _, rsrcMgr := range RsrcMgr.ResourceMgrs {
-		if err := rsrcMgr.ClearDeviceResourcePool(context.Background()); err != nil {
-			logger.Debug(nil, "Failed to clear device resource pool")
-			return err
-		}
+	if err := RsrcMgr.PonRsrMgr.ClearDeviceResourcePool(context.Background()); err != nil {
+		logger.Debug(nil, "Failed to clear device resource pool")
+		return err
 	}
 	logger.Debug(nil, "Cleared device resource pool")
 	return nil
@@ -296,17 +151,21 @@
 
 // GetONUID returns the available OnuID for the given pon-port
 func (RsrcMgr *OpenOltResourceMgr) GetONUID(ponIntfID uint32) (uint32, error) {
-	RsrcMgr.OnuIDMgmtLock[ponIntfID].Lock()
-	defer RsrcMgr.OnuIDMgmtLock[ponIntfID].Unlock()
-	// Check if Pon Interface ID is present in Resource-manager-map
-	ONUIDs, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(context.Background(), ponIntfID,
+	ctx := context.Background()
+	newCtx := context.WithValue(ctx, "ponIf", ponIntfID)
+	// Get ONU id for a provided pon interface ID.
+	onuID, err := RsrcMgr.TechprofileRef.GetResourceID(newCtx, ponIntfID,
 		ponrmgr.ONU_ID, 1)
 	if err != nil {
 		logger.Errorf(nil, "Failed to get resource for interface %d for type %s",
 			ponIntfID, ponrmgr.ONU_ID)
-		return uint32(0), err
+		return 0, err
 	}
-	return ONUIDs[0], err
+	if len(onuID) > 0 {
+		return onuID[0], err
+	}
+
+	return 0, err // return onuID 0 on error
 }
 
 // GetFlowID return flow ID for a given pon interface id, onu id and uni id
diff --git a/core/subscriber_manager.go b/core/subscriber_manager.go
index 9abec13..8c92b56 100644
--- a/core/subscriber_manager.go
+++ b/core/subscriber_manager.go
@@ -18,12 +18,12 @@
 
 import (
 	"fmt"
+	"github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"sync"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	"github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
 	"golang.org/x/net/context"
 )
 
@@ -79,7 +79,7 @@
 	FailedScheds []oop.TrafficScheduler `json:"failedScheds"`
 	FailedQueues []oop.TrafficQueue     `json:"failedQueues"`
 
-	TpInstance    map[int]*techprofile.TechProfile
+	TpInstance    map[int]*tech_profile.TechProfileInstance
 	OpenOltClient oop.OpenoltClient
 	TestConfig    *config.OpenOltScaleTesterConfig
 	RsrMgr        *OpenOltResourceMgr
@@ -90,16 +90,14 @@
 
 	logger.Infow(nil, "workflow-deploy-started-for-subscriber", log.Fields{"subsName": subs.SubscriberName})
 
-	subs.TpInstance = make(map[int]*techprofile.TechProfile)
+	subs.TpInstance = make(map[int]*tech_profile.TechProfileInstance)
 
 	for _, tpID := range subs.TestConfig.TpIDList {
 		uniPortName := fmt.Sprintf(UniPortName, subs.PonIntf, subs.OnuID, subs.UniID)
-		subs.RsrMgr.GemIDAllocIDLock[subs.PonIntf].Lock()
-		tpInstInterface, err := subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.CreateTechProfInstance(context.Background(), uint32(tpID), uniPortName, subs.PonIntf)
+		tpInstInterface, err := subs.RsrMgr.TechprofileRef.CreateTechProfileInstance(context.Background(), uint32(tpID), uniPortName, subs.PonIntf)
 		// TODO: Assumes the techprofile is of type TechProfile (XGPON, GPON). But it could also be EPON TechProfile type. But we do not support that at the moment, so it is OK.
-		subs.TpInstance[tpID] = tpInstInterface.(*techprofile.TechProfile)
+		subs.TpInstance[tpID] = tpInstInterface.(*tech_profile.TechProfileInstance)
 
-		subs.RsrMgr.GemIDAllocIDLock[subs.PonIntf].Unlock()
 		if err != nil {
 			logger.Errorw(nil, "error-creating-tp-instance-for-subs",
 				log.Fields{"subsName": subs.SubscriberName, "onuID": subs.OnuID, "tpID": tpID})
diff --git a/core/tt_workflow.go b/core/tt_workflow.go
index 783dfe6..455a58b 100644
--- a/core/tt_workflow.go
+++ b/core/tt_workflow.go
@@ -22,9 +22,9 @@
 	"sync/atomic"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -198,8 +198,7 @@
 func AddTtFlow(subs *Subscriber, flowType string, direction string, flowID uint64,
 	allocID uint32, gemID uint32, pcp uint32, replicateFlow bool, symmetricFlowID uint64,
 	pbitToGem map[uint32]uint32) error {
-	logger.Infow(nil, "add-flow", log.Fields{"WorkFlow": subs.TestConfig.WorkflowName, "FlowType": flowType,
-		"direction": direction, "flowID": flowID})
+
 	var err error
 
 	flowClassifier, actionInfo := FormatTtClassfierAction(flowType, direction, subs)
@@ -215,6 +214,8 @@
 		SymmetricFlowId: symmetricFlowID,
 		ReplicateFlow:   replicateFlow, PbitToGemport: pbitToGem}
 
+	logger.Infow(nil, "adding-flow", log.Fields{"flow": flow})
+
 	_, err = subs.OpenOltClient.FlowAdd(context.Background(), &flow)
 
 	st, _ := status.FromError(err)
@@ -337,9 +338,9 @@
 	pbitToGem := make(map[uint32]uint32)
 	var pcp uint32
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
@@ -386,9 +387,9 @@
 	pbitToGem := make(map[uint32]uint32)
 	var pcp uint32
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
@@ -445,9 +446,9 @@
 	pbitToGem := make(map[uint32]uint32)
 	var pcp uint32
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
@@ -513,9 +514,9 @@
 	var gemPortIDs []uint32
 	var pcp uint32
 
-	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
+	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocId
 	for _, gem := range subs.TpInstance[subs.TestConfig.TpIDList[0]].UpstreamGemPortAttributeList {
-		gemPortIDs = append(gemPortIDs, gem.GemportID)
+		gemPortIDs = append(gemPortIDs, gem.GemportId)
 	}
 
 	for idx, gemID := range gemPortIDs {
diff --git a/core/utils.go b/core/utils.go
index 1bcdedd..eca4f20 100644
--- a/core/utils.go
+++ b/core/utils.go
@@ -18,9 +18,10 @@
 
 import (
 	"fmt"
+	"sync"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	"github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-protos/v5/go/openolt"
 )
 
 type DtStagKey struct {
@@ -32,6 +33,7 @@
 var DtCtag map[uint32]uint32
 var AttCtag map[uint32]uint32
 var TtCtag map[uint32]uint32
+var mutex sync.RWMutex
 
 func init() {
 	AttCtag = make(map[uint32]uint32)
@@ -77,6 +79,8 @@
 func GetAttCtag(ponIntf uint32) uint32 {
 	var currCtag uint32
 	var ok bool
+	mutex.Lock()
+	defer mutex.Unlock()
 	if currCtag, ok = AttCtag[ponIntf]; !ok {
 		// Start with ctag 2
 		AttCtag[ponIntf] = 2
@@ -89,6 +93,8 @@
 func GetDtCtag(ponIntf uint32) uint32 {
 	var currCtag uint32
 	var ok bool
+	mutex.Lock()
+	defer mutex.Unlock()
 	if currCtag, ok = DtCtag[ponIntf]; !ok {
 		// Start with ctag 1
 		DtCtag[ponIntf] = 1
@@ -101,6 +107,8 @@
 func GetTtCtag(ponIntf uint32) uint32 {
 	var currCtag uint32
 	var ok bool
+	mutex.Lock()
+	defer mutex.Unlock()
 	if currCtag, ok = TtCtag[ponIntf]; !ok {
 		// Start with ctag 1
 		TtCtag[ponIntf] = 1
@@ -118,7 +126,8 @@
 func GetDtStag(ponIntf uint32, onuID uint32, uniID uint32) uint32 {
 	// Dt workflow requires unique stag for each subscriber
 	key := DtStagKey{ponIntf: ponIntf, onuID: onuID, uniID: uniID}
-
+	mutex.Lock()
+	defer mutex.Unlock()
 	if value, ok := DtStag[key]; ok {
 		return value
 	} else {
diff --git a/core/workflow_manager.go b/core/workflow_manager.go
index f642eb5..0a961ab 100644
--- a/core/workflow_manager.go
+++ b/core/workflow_manager.go
@@ -20,8 +20,8 @@
 	"errors"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
 )
 
 type WorkFlow interface {
diff --git a/core/workflow_utils.go b/core/workflow_utils.go
index 818b3ec..8cbc7ea 100644
--- a/core/workflow_utils.go
+++ b/core/workflow_utils.go
@@ -22,9 +22,9 @@
 	"time"
 
 	"github.com/opencord/openolt-scale-tester/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	oop "github.com/opencord/voltha-protos/v4/go/openolt"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	oop "github.com/opencord/voltha-protos/v5/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -97,18 +97,17 @@
 
 func getTrafficSched(subs *Subscriber, direction tp_pb.Direction) []*tp_pb.TrafficScheduler {
 	var SchedCfg *tp_pb.SchedulerConfig
-	var err error
 
 	if direction == tp_pb.Direction_DOWNSTREAM {
-		SchedCfg, err = subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.
-			GetDsScheduler(nil, subs.TpInstance[subs.TestConfig.TpIDList[0]])
+		SchedCfg = subs.RsrMgr.TechprofileRef.
+			GetDsScheduler(subs.TpInstance[subs.TestConfig.TpIDList[0]])
 	} else {
-		SchedCfg, err = subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.
-			GetUsScheduler(nil, subs.TpInstance[subs.TestConfig.TpIDList[0]])
+		SchedCfg = subs.RsrMgr.TechprofileRef.
+			GetUsScheduler(subs.TpInstance[subs.TestConfig.TpIDList[0]])
 	}
 
-	if err != nil {
-		logger.Errorw(nil, "Failed to create traffic schedulers", log.Fields{"direction": direction, "error": err})
+	if SchedCfg == nil {
+		logger.Errorw(nil, "Failed to create traffic schedulers", log.Fields{"direction": direction})
 		return nil
 	}
 
@@ -122,7 +121,7 @@
 
 	TrafficShaping := &tp_pb.TrafficShapingInfo{Cir: uint32(cir), Cbs: uint32(cbs), Pir: uint32(pir), Pbs: uint32(pbs)}
 
-	TrafficSched := []*tp_pb.TrafficScheduler{subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.
+	TrafficSched := []*tp_pb.TrafficScheduler{subs.RsrMgr.TechprofileRef.
 		GetTrafficScheduler(subs.TpInstance[subs.TestConfig.TpIDList[0]], SchedCfg, TrafficShaping)}
 
 	return TrafficSched
@@ -130,7 +129,7 @@
 
 func getTrafficQueues(subs *Subscriber, direction tp_pb.Direction) []*tp_pb.TrafficQueue {
 
-	trafficQueues, err := subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.
+	trafficQueues, err := subs.RsrMgr.TechprofileRef.
 		GetTrafficQueues(nil, subs.TpInstance[subs.TestConfig.TpIDList[0]], direction)
 
 	if err == nil {
@@ -441,7 +440,7 @@
 
 	TfShInfo := &tp_pb.TrafficShapingInfo{Cir: uint32(cir), Cbs: uint32(cbs), Pir: uint32(pir), Pbs: uint32(pbs)}
 
-	TrafficSched := []*tp_pb.TrafficScheduler{grp.Subs.RsrMgr.ResourceMgrs[grp.Subs.PonIntf].TechProfileMgr.
+	TrafficSched := []*tp_pb.TrafficScheduler{grp.Subs.RsrMgr.TechprofileRef.
 		GetTrafficScheduler(grp.Subs.TpInstance[grp.Subs.TestConfig.TpIDList[0]], SchedCfg, TfShInfo)}
 
 	if TrafficSched == nil {